hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e16de1d5fc9d4b5797d515da70c2fd9768004bef
| 260
|
py
|
Python
|
py_headless_daw/project/content/clip.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 22
|
2020-06-09T18:46:56.000Z
|
2021-09-28T02:11:42.000Z
|
py_headless_daw/project/content/clip.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 19
|
2020-06-03T06:34:57.000Z
|
2021-01-26T07:36:17.000Z
|
py_headless_daw/project/content/clip.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 1
|
2020-06-18T09:25:21.000Z
|
2020-06-18T09:25:21.000Z
|
class Clip:
def __init__(self, start_time: float, end_time: float):
self.start_time: float = start_time
self.end_time: float = end_time
@property
def length_in_seconds(self) -> float:
return self.end_time - self.start_time
| 28.888889
| 59
| 0.673077
| 37
| 260
| 4.351351
| 0.378378
| 0.223602
| 0.242236
| 0.223602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238462
| 260
| 8
| 60
| 32.5
| 0.813131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
e190da119472b2580bed48da8f079ad37354b202
| 4,539
|
py
|
Python
|
utils.py
|
MLRC2022FSCS/FSCS
|
24ddffa80ba8f2454e132b0a883d117fc189261c
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
MLRC2022FSCS/FSCS
|
24ddffa80ba8f2454e132b0a883d117fc189261c
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
MLRC2022FSCS/FSCS
|
24ddffa80ba8f2454e132b0a883d117fc189261c
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torchvision import transforms
from PIL import Image
class MapAdultDataset(object):
"""
The map-style dataset object for the Adult dataset, which is used
as input for the pytorch Dataloader constructor.
"""
def __init__(self, x, y, d) -> None:
"""
Initialization of the dataset object.
Args:
x: The dataframe containing the data for the Adult dataset.
y: The target values dataframe.
d: The protected attribute dataframe.
"""
# Turn the provided dataframes into tensors.
self.x = torch.tensor(x.values, dtype=torch.float32)
self.y = torch.tensor(y, dtype=torch.float32)
self.d = torch.tensor(d, dtype=torch.long)
def __len__(self):
"""
Function that detemines the length of the object.
Returns:
lenght: The lenght of the object.
"""
return len(self.y)
def __getitem__(self, idx):
"""
Function that gathers the requested data and returns it.
Args:
idx: The index of the object that is requested.
Returns:
x: The data that corresponds to the index.
y: The target value of the requested index.
d: The protected attribute that corresponds to the index.
"""
return self.x[idx], self.y[idx], self.d[idx]
class MapCelebADataset(object):
"""
The map-style dataset object for the CelebA dataset, which is used
as input for the pytorch Dataloader constructor.
"""
def __init__(self, x_dir, y, d) -> None:
"""
Initialization of the dataset object.
Args:
x_dir: Direction of the data image files for CelebA.
y: The target values dataframe.
d: The protected attribute dataframe.
"""
# Save the directory.
self.x_dir = x_dir
# Turn the provided dataframes into tensors.
self.y = torch.tensor(y, dtype=torch.float32)
self.d = torch.tensor(d, dtype=torch.long)
# Transformation for the dataset as replied by the authors.
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])])
def __len__(self):
"""
Function that detemines the length of the object.
Returns:
lenght: The lenght of the object.
"""
return len(self.y)
def __getitem__(self, idx):
"""
Function that gathers the requested data and returns it.
Args:
idx: The index of the object that is requested.
Returns:
img: The transformed image that corresponds to the index.
y: The target value of the requested index.
d: The protected attribute that corresponds to the index.
"""
# Grab the image from the directory and apply the transformation.
img = Image.open(self.x_dir + str(idx+1).zfill(6) + '.jpg')
img = self.transform(img)
return img, self.y[idx], self.d[idx]
class MapCivilCommentsDataset(object):
"""
The map-style dataset object for the Civil dataset, which is used
as input for the pytorch Dataloader constructor.
"""
def __init__(self, x, y, d) -> None:
"""
Initialization of the dataset object.
Args:
x: Tensor of the data for the Civil dataset.
y: The target values dataframe.
d: The protected attribute dataframe.
"""
self.x = x
self.y = torch.tensor(y, dtype=torch.float32)
self.d = torch.tensor(d, dtype=torch.long)
def __len__(self):
"""
Function that detemines the length of the object.
Returns:
lenght: The lenght of the object.
"""
return len(self.y)
def __getitem__(self, idx):
"""
Function that gathers the requested data and returns it.
Args:
idx: The index of the object that is requested.
Returns:
x: The data that corresponds to the index.
y: The target value of the requested index.
d: The protected attribute that corresponds to the index.
"""
return self.x[idx], self.y[idx], self.d[idx]
| 31.303448
| 73
| 0.573474
| 558
| 4,539
| 4.591398
| 0.181004
| 0.033177
| 0.038642
| 0.051522
| 0.766979
| 0.766979
| 0.762295
| 0.722482
| 0.680328
| 0.680328
| 0
| 0.009412
| 0.344569
| 4,539
| 144
| 74
| 31.520833
| 0.851765
| 0.505838
| 0
| 0.513514
| 0
| 0
| 0.002372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.243243
| false
| 0
| 0.081081
| 0
| 0.567568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
e1aafa821dfd042eeb08c8c60babc1fccfa642a2
| 27
|
py
|
Python
|
package/zimagi/__init__.py
|
zimagi/zima
|
d87b3f91e2fa669a77145413582d636d783a0c71
|
[
"Apache-2.0"
] | null | null | null |
package/zimagi/__init__.py
|
zimagi/zima
|
d87b3f91e2fa669a77145413582d636d783a0c71
|
[
"Apache-2.0"
] | null | null | null |
package/zimagi/__init__.py
|
zimagi/zima
|
d87b3f91e2fa669a77145413582d636d783a0c71
|
[
"Apache-2.0"
] | null | null | null |
from .facade import Client
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83326c1fb923b98fce53579f155946ac3c01721f
| 15
|
py
|
Python
|
tests/db1/__init__.py
|
daqing15/elixir
|
53fe515f76d31dc816e9ab99ddd0ceda1d9d574f
|
[
"MIT"
] | 1
|
2015-08-25T14:23:17.000Z
|
2015-08-25T14:23:17.000Z
|
tests/db1/__init__.py
|
daqing15/elixir
|
53fe515f76d31dc816e9ab99ddd0ceda1d9d574f
|
[
"MIT"
] | null | null | null |
tests/db1/__init__.py
|
daqing15/elixir
|
53fe515f76d31dc816e9ab99ddd0ceda1d9d574f
|
[
"MIT"
] | null | null | null |
import a, b, c
| 7.5
| 14
| 0.6
| 4
| 15
| 2.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 15
| 1
| 15
| 15
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83386c5a6d3731879553d2f0946a65f2bd7f3b3f
| 3,972
|
py
|
Python
|
backend/tests/baserow/contrib/database/migrations/test_remove_field_by_id_migration.py
|
6ix-Inc/baserow
|
63268349d787577cf5773a77daf06f50e2acf8a5
|
[
"MIT"
] | 1
|
2022-01-24T15:12:02.000Z
|
2022-01-24T15:12:02.000Z
|
backend/tests/baserow/contrib/database/migrations/test_remove_field_by_id_migration.py
|
rasata/baserow
|
c6e1d7842c53f801e1c96b49f1377da2a06afaa9
|
[
"MIT"
] | null | null | null |
backend/tests/baserow/contrib/database/migrations/test_remove_field_by_id_migration.py
|
rasata/baserow
|
c6e1d7842c53f801e1c96b49f1377da2a06afaa9
|
[
"MIT"
] | null | null | null |
import pytest
# noinspection PyPep8Naming
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
# noinspection PyPep8Naming
@pytest.mark.django_db
def test_forwards_migration(data_fixture, transactional_db, migrate_to_latest_at_end):
migrate_from = [("database", "0039_formulafield")]
migrate_to = [("database", "0040_formulafield_remove_field_by_id")]
old_state = migrate(migrate_from)
# The models used by the data_fixture below are not touched by this migration so
# it is safe to use the latest version in the test.
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(user=user, table=table, name="text")
FormulaField = old_state.apps.get_model("database", "FormulaField")
ContentType = old_state.apps.get_model("contenttypes", "ContentType")
content_type_id = ContentType.objects.get_for_model(FormulaField).id
formula_field = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field_by_id({text_field.id})",
content_type_id=content_type_id,
order=0,
name="a",
)
unknown_field_by_id = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field_by_id(9999)",
content_type_id=content_type_id,
order=0,
name="b",
)
new_state = migrate(migrate_to)
NewFormulaField = new_state.apps.get_model("database", "FormulaField")
new_formula_field = NewFormulaField.objects.get(id=formula_field.id)
assert new_formula_field.formula == "field('text')"
assert (
new_formula_field.old_formula_with_field_by_id
== f"field_by_id({text_field.id})"
)
new_unknown_field_by_id = NewFormulaField.objects.get(id=unknown_field_by_id.id)
assert new_unknown_field_by_id.formula == "field('unknown field 9999')"
assert new_unknown_field_by_id.old_formula_with_field_by_id == f"field_by_id(9999)"
# noinspection PyPep8Naming
@pytest.mark.django_db
def test_backwards_migration(data_fixture, transactional_db, migrate_to_latest_at_end):
migrate_from = [("database", "0040_formulafield_remove_field_by_id")]
migrate_to = [("database", "0039_formulafield")]
old_state = migrate(migrate_from)
# The models used by the data_fixture below are not touched by this migration so
# it is safe to use the latest version in the test.
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(user=user, table=table, name="text")
FormulaField = old_state.apps.get_model("database", "FormulaField")
ContentType = old_state.apps.get_model("contenttypes", "ContentType")
content_type_id = ContentType.objects.get_for_model(FormulaField).id
formula_field = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field('text')",
content_type_id=content_type_id,
order=0,
name="a",
)
unknown_field = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field('unknown')",
content_type_id=content_type_id,
order=0,
name="b",
)
new_state = migrate(migrate_to)
NewFormulaField = new_state.apps.get_model("database", "FormulaField")
new_formula_field = NewFormulaField.objects.get(id=formula_field.id)
assert new_formula_field.formula == f"field_by_id({text_field.id})"
new_unknown_field_by_id = NewFormulaField.objects.get(id=unknown_field.id)
assert new_unknown_field_by_id.formula == "field('unknown')"
def migrate(target):
executor = MigrationExecutor(connection)
executor.loader.build_graph() # reload.
executor.migrate(target)
new_state = executor.loader.project_state(target)
return new_state
| 38.563107
| 87
| 0.725831
| 529
| 3,972
| 5.119093
| 0.1569
| 0.041359
| 0.053176
| 0.041359
| 0.850074
| 0.84712
| 0.837888
| 0.812038
| 0.771787
| 0.771787
| 0
| 0.010635
| 0.17145
| 3,972
| 102
| 88
| 38.941176
| 0.812215
| 0.086354
| 0
| 0.55
| 0
| 0
| 0.13674
| 0.043094
| 0
| 0
| 0
| 0
| 0.075
| 1
| 0.0375
| false
| 0
| 0.0375
| 0
| 0.0875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
83499959d7ee1e41f9a07d63fc1524cb62ef8476
| 104
|
py
|
Python
|
utilities/downScale.py
|
yangzhao-666/Potential-based-Reward-Shaping-in-Sokoban
|
f7bd090e75cf394033fe95e7b15ce83e331c5ad9
|
[
"MIT"
] | 5
|
2021-10-30T17:59:17.000Z
|
2022-02-03T17:31:51.000Z
|
utilities/downScale.py
|
yangzhao-666/PbRSS
|
f7bd090e75cf394033fe95e7b15ce83e331c5ad9
|
[
"MIT"
] | null | null | null |
utilities/downScale.py
|
yangzhao-666/PbRSS
|
f7bd090e75cf394033fe95e7b15ce83e331c5ad9
|
[
"MIT"
] | null | null | null |
from skimage.transform import resize
def downScale(state):
return 255 * resize(state, (80, 80, 3))
| 20.8
| 43
| 0.711538
| 15
| 104
| 4.933333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 0.173077
| 104
| 4
| 44
| 26
| 0.767442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
3608ae216e2d1715e5f0f045ae38a16f1f2fbb4c
| 38
|
py
|
Python
|
tests/test_config2.py
|
kpedro88/magiconfig
|
0f4e23f3670dd9dc33fe60163f5706129184a421
|
[
"MIT"
] | 2
|
2022-03-31T21:24:58.000Z
|
2022-03-31T21:33:12.000Z
|
tests/test_config2.py
|
kpedro88/magiconfig
|
0f4e23f3670dd9dc33fe60163f5706129184a421
|
[
"MIT"
] | null | null | null |
tests/test_config2.py
|
kpedro88/magiconfig
|
0f4e23f3670dd9dc33fe60163f5706129184a421
|
[
"MIT"
] | null | null | null |
from test_config import config as cfg
| 19
| 37
| 0.842105
| 7
| 38
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 38
| 1
| 38
| 38
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
363098510b2bedcd4ace94e968f6dca21b53316e
| 225
|
py
|
Python
|
tests/test_init.py
|
kalaspuff/ready
|
e56aec53b93f00181d92eaf071d8abe1d87bc33f
|
[
"MIT"
] | null | null | null |
tests/test_init.py
|
kalaspuff/ready
|
e56aec53b93f00181d92eaf071d8abe1d87bc33f
|
[
"MIT"
] | null | null | null |
tests/test_init.py
|
kalaspuff/ready
|
e56aec53b93f00181d92eaf071d8abe1d87bc33f
|
[
"MIT"
] | null | null | null |
import ready
def test_init() -> None:
assert ready
assert isinstance(ready.__version_info__, tuple)
assert ready.__version_info__
assert isinstance(ready.__version__, str)
assert len(ready.__version__)
| 20.454545
| 52
| 0.742222
| 27
| 225
| 5.481481
| 0.481481
| 0.324324
| 0.283784
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182222
| 225
| 10
| 53
| 22.5
| 0.804348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0.142857
| true
| 0
| 0.142857
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
36f778383bf607e0db2e417631759db43797dc86
| 17
|
py
|
Python
|
tenable/base/__init__.py
|
csanders-git/pyTenable
|
dea25ba02b049bfe3a8cd151c155c3ccf9b2a285
|
[
"MIT"
] | null | null | null |
tenable/base/__init__.py
|
csanders-git/pyTenable
|
dea25ba02b049bfe3a8cd151c155c3ccf9b2a285
|
[
"MIT"
] | 1
|
2021-06-02T01:10:50.000Z
|
2021-06-02T01:10:50.000Z
|
tenable/base/__init__.py
|
csanders-git/pyTenable
|
dea25ba02b049bfe3a8cd151c155c3ccf9b2a285
|
[
"MIT"
] | null | null | null |
from .v1 import *
| 17
| 17
| 0.705882
| 3
| 17
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.176471
| 17
| 1
| 17
| 17
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7fd40560aaab3684eab48aadb6b53f69f08a41df
| 4,583
|
py
|
Python
|
tests/test_skymap.py
|
LBJ-Wade/hasasia
|
062ea8282595411fbec8d63e7931e9076816b9a1
|
[
"MIT"
] | 9
|
2019-08-13T22:48:25.000Z
|
2022-01-19T14:42:39.000Z
|
tests/test_skymap.py
|
LBJ-Wade/hasasia
|
062ea8282595411fbec8d63e7931e9076816b9a1
|
[
"MIT"
] | 3
|
2019-08-20T18:23:21.000Z
|
2021-01-31T07:17:41.000Z
|
tests/test_skymap.py
|
LBJ-Wade/hasasia
|
062ea8282595411fbec8d63e7931e9076816b9a1
|
[
"MIT"
] | 5
|
2019-07-30T14:36:22.000Z
|
2022-01-19T14:42:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `hasasia.sensitivity` module and `hasasia.sim` module."""
import pytest
import numpy as np
import hasasia.sensitivity as hsen
import hasasia.sim as hsim
import hasasia.skymap as hsky
@pytest.fixture
def sm_simple():
'''Test and keep a simple sensitivity skymap'''
#Make a set of random sky positions
phi = np.random.uniform(0, 2*np.pi,size=33)
cos_theta = np.random.uniform(-1,1,size=33)
theta = np.arccos(cos_theta)
#Adding one well-placed sky position for plots.
phi = np.append(np.array(np.deg2rad(60)),phi)
theta = np.append(np.array(np.deg2rad(50)),theta)
#Define the timsespans and TOA errors for the pulsars
timespans = np.random.uniform(3.0,11.4,size=34)
Tspan = timespans.max()*365.25*24*3600
sigma = 1e-7 # 100 ns
#Simulate a set of identical pulsars, with different sky positions.
psrs = hsim.sim_pta(timespan=11.4, cad=23, sigma=sigma,
phi=phi, theta=theta)
freqs = np.logspace(np.log10(1/(5*Tspan)),np.log10(2e-7),500)
spectra = []
for p in psrs:
sp = hsen.Spectrum(p, freqs=freqs)
sp.NcalInv
spectra.append(sp)
#Normally use the healpy functions to get the sky coordinates
#Here just pull random coordinates using numpy to avoid needing healpy
phi_gw = np.random.uniform(0, 2*np.pi,size=1000)
cos_theta_gw = np.random.uniform(-1,1,size=1000)
theta_gw = np.arccos(cos_theta_gw)
SM = hsky.SkySensitivity(spectra,theta_gw, phi_gw)
return SM
@pytest.fixture
def spectra_theta_phi():
'''Test and keep a simple sensitivity skymap'''
#Make a set of random sky positions
phi = np.random.uniform(0, 2*np.pi,size=33)
cos_theta = np.random.uniform(-1,1,size=33)
theta = np.arccos(cos_theta)
#Adding one well-placed sky position for plots.
phi = np.append(np.array(np.deg2rad(60)),phi)
theta = np.append(np.array(np.deg2rad(50)),theta)
#Define the timsespans and TOA errors for the pulsars
timespans = np.random.uniform(3.0,11.4,size=34)
Tspan = timespans.max()*365.25*24*3600
sigma = 1e-7 # 100 ns
#Simulate a set of identical pulsars, with different sky positions.
psrs = hsim.sim_pta(timespan=11.4, cad=23, sigma=sigma,
phi=phi, theta=theta)
freqs = np.logspace(np.log10(1/(5*Tspan)),np.log10(2e-7),500)
spectra = []
for p in psrs:
sp = hsen.Spectrum(p, freqs=freqs)
sp.NcalInv
spectra.append(sp)
#Normally use the healpy functions to get the sky coordinates
#Here just pull random coordinates using numpy to avoid needing healpy
phi_gw = np.random.uniform(0, 2*np.pi,size=1000)
cos_theta_gw = np.random.uniform(-1,1,size=1000)
theta_gw = np.arccos(cos_theta_gw)
return spectra, theta_gw, phi_gw
def test_skymap(sm_simple):
'''test sky map functionality.'''
hCirc = hsky.h0_circ(1e9,200,5e-9).to('')
sm_simple.SNR(hCirc.value)
sm_simple.h_thresh(2)
h_divA = (hsky.h_circ(1e9,200,5e-9,sm_simple.Tspan,sm_simple.freqs)
/hsky.h0_circ(1e9,200,5e-9)).value
Amp = sm_simple.A_gwb(h_divA)
def test_pulsar_term_skymap(spectra_theta_phi):
'''scalar test'''
spectra, theta_gw, phi_gw = spectra_theta_phi
SM = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pulsar_term=True)
def test_pulsar_term_snr(spectra_theta_phi):
'''scalar test'''
spectra, theta_gw, phi_gw = spectra_theta_phi
SM_pt = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pulsar_term=True)
SM = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pulsar_term=False)
hCirc = hsky.h0_circ(1e9,200,5e-9).to('')
assert (SM_pt.SNR(hCirc.value)/SM.SNR(hCirc.value)/np.sqrt(2)).all()
def test_explicit_pulsar_term_skymap(spectra_theta_phi):
'''scalar test'''
spectra, theta_gw, phi_gw = spectra_theta_phi
SM = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pulsar_term='explicit')
def test_scalar_long_skymap(spectra_theta_phi):
'''scalar test'''
spectra, theta_gw, phi_gw = spectra_theta_phi
SM = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pol='scalar-long')
def test_vector_long_skymap(spectra_theta_phi):
'''scalar test'''
spectra, theta_gw, phi_gw = spectra_theta_phi
SM = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pol='vector-long')
def test_scalar_trans_skymap(spectra_theta_phi):
'''scalar test'''
spectra, theta_gw, phi_gw = spectra_theta_phi
SM = hsky.SkySensitivity(spectra, theta_gw, phi_gw, pol='scalar-trans')
| 32.735714
| 79
| 0.689068
| 728
| 4,583
| 4.179945
| 0.203297
| 0.110417
| 0.069011
| 0.083799
| 0.8163
| 0.805784
| 0.805784
| 0.79954
| 0.786724
| 0.769635
| 0
| 0.04398
| 0.186341
| 4,583
| 139
| 80
| 32.971223
| 0.772057
| 0.209252
| 0
| 0.615385
| 0
| 0
| 0.011804
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 1
| 0.115385
| false
| 0
| 0.064103
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d2d70434ec7a140190843424d7f5964f90cb78e
| 20
|
py
|
Python
|
recognise_district/__init__.py
|
waterblas/ROIBase-lite
|
935c201c40d82310f4af8a62faf03302cf5d1a5b
|
[
"Apache-2.0"
] | 2
|
2020-09-04T08:18:46.000Z
|
2021-08-02T12:25:35.000Z
|
recognise_district/__init__.py
|
waterblas/ROIBase-lite
|
935c201c40d82310f4af8a62faf03302cf5d1a5b
|
[
"Apache-2.0"
] | null | null | null |
recognise_district/__init__.py
|
waterblas/ROIBase-lite
|
935c201c40d82310f4af8a62faf03302cf5d1a5b
|
[
"Apache-2.0"
] | null | null | null |
from . import recog
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e9e3d1771458953b0a0fbb89637d70bcbd3fda6f
| 70
|
py
|
Python
|
currency_converter/__init__.py
|
hoou/currency-converter
|
d77a33f6dd8be42b0de213ab53bec0195369e017
|
[
"MIT"
] | null | null | null |
currency_converter/__init__.py
|
hoou/currency-converter
|
d77a33f6dd8be42b0de213ab53bec0195369e017
|
[
"MIT"
] | null | null | null |
currency_converter/__init__.py
|
hoou/currency-converter
|
d77a33f6dd8be42b0de213ab53bec0195369e017
|
[
"MIT"
] | null | null | null |
import currency_converter
def main():
currency_converter.main()
| 11.666667
| 29
| 0.757143
| 8
| 70
| 6.375
| 0.625
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 70
| 5
| 30
| 14
| 0.864407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1806b9527aca8dc67e45ea300814d0a5ea6439d8
| 20
|
py
|
Python
|
brainframe_qt/constants/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 17
|
2021-02-11T18:19:22.000Z
|
2022-02-08T06:12:50.000Z
|
brainframe_qt/constants/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 80
|
2021-02-11T08:27:31.000Z
|
2021-10-13T21:33:22.000Z
|
brainframe_qt/constants/__init__.py
|
aotuai/brainframe-qt
|
082cfd0694e569122ff7c63e56dd0ec4b62d5bac
|
[
"BSD-3-Clause"
] | 5
|
2021-02-12T09:51:34.000Z
|
2022-02-08T09:25:15.000Z
|
from . import oauth
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
186d46db50f92a5805ee9464dbb38b3c3881a20f
| 780
|
py
|
Python
|
ensembl_prodinf/__init__.py
|
dbolser-ebi/ensembl-prodinf-core
|
1de431fd98741e9dde9a60737d33cd3e51853594
|
[
"Apache-2.0"
] | null | null | null |
ensembl_prodinf/__init__.py
|
dbolser-ebi/ensembl-prodinf-core
|
1de431fd98741e9dde9a60737d33cd3e51853594
|
[
"Apache-2.0"
] | null | null | null |
ensembl_prodinf/__init__.py
|
dbolser-ebi/ensembl-prodinf-core
|
1de431fd98741e9dde9a60737d33cd3e51853594
|
[
"Apache-2.0"
] | null | null | null |
from ensembl_prodinf.hive import Analysis
from ensembl_prodinf.hive import Result
from ensembl_prodinf.hive import LogMessage
from ensembl_prodinf.hive import Job
from ensembl_prodinf.hive import HiveInstance
from ensembl_prodinf.utils import dict_to_perl_string, list_to_perl_string, escape_perl_string
from ensembl_prodinf.db_utils import list_databases
from ensembl_prodinf.server_utils import get_status
from ensembl_prodinf.reporting import get_logger, set_logger_context
from ensembl_prodinf.email_celery_app import app as email_celery_app
from ensembl_prodinf.handover_celery_app import app as handover_celery_app
from ensembl_prodinf.event_celery_app import app as event_celery_app
from ensembl_prodinf.reporting import QueueAppenderHandler, ContextFilter, JsonFormatter
| 55.714286
| 94
| 0.894872
| 116
| 780
| 5.672414
| 0.301724
| 0.217325
| 0.355623
| 0.167173
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082051
| 780
| 13
| 95
| 60
| 0.918994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a13d2b6f28f99ac6eb8512048d87b83e252b3b01
| 4,872
|
py
|
Python
|
Coursera/Google_IT_Automation_with_Python/02_Using_Python_to_Interact_with_the_Operating_System/Week_2/wk2_mod3_pquiz.py
|
ssolomon2020/Self_Study_Python_Training
|
b253093b185f4a0d98cb8565f5fcf2b0e4a99556
|
[
"MIT"
] | null | null | null |
Coursera/Google_IT_Automation_with_Python/02_Using_Python_to_Interact_with_the_Operating_System/Week_2/wk2_mod3_pquiz.py
|
ssolomon2020/Self_Study_Python_Training
|
b253093b185f4a0d98cb8565f5fcf2b0e4a99556
|
[
"MIT"
] | null | null | null |
Coursera/Google_IT_Automation_with_Python/02_Using_Python_to_Interact_with_the_Operating_System/Week_2/wk2_mod3_pquiz.py
|
ssolomon2020/Self_Study_Python_Training
|
b253093b185f4a0d98cb8565f5fcf2b0e4a99556
|
[
"MIT"
] | null | null | null |
# Specialization: Google IT Automation with Python
# Course 02: Using Python to Interact with the Operating System
# Week 2 Module Part 3 - Practice Quiz
# Student: Shawn Solomon
# Learning Platform: Coursera.org
# Scripting examples encountered during the Module Part 3 Practice Quiz:
# 01. We're working with a list of flowers and some information about each one.
# The create_file function writes this information to a CSV file. The contents_of_file
# function reads this file into records and returns the information in a nicely formatted
# block. Fill in the gaps of the contents_of_file function to turn the data in the CSV
# file into a dictionary using DictReader.
# import os
# import csv
#
# # Create a file with data in it
# def create_file(filename):
# with open(filename, "w") as file:
# file.write("name,color,type\n")
# file.write("carnation,pink,annual\n")
# file.write("daffodil,yellow,perennial\n")
# file.write("iris,blue,perennial\n")
# file.write("poinsettia,red,perennial\n")
# file.write("sunflower,yellow,annual\n")
#
# # Read the file contents and format the information about each row
# def contents_of_file(filename):
# return_string = ""
#
# # Call the function to create the file
# create_file(filename)
#
# # Open the file
# ___
# # Read the rows of the file into a dictionary
# ___
# # Process each item of the dictionary
# for ___:
# return_string += "a {} {} is {}\n".format(row["color"], row["name"], row["type"])
# return return_string
#
# #Call the function
# print(contents_of_file("flowers.csv"))
import os
import csv
# Create a file with data in it
def create_file(filename):
with open(filename, "w") as file:
file.write("name,color,type\n")
file.write("carnation,pink,annual\n")
file.write("daffodil,yellow,perennial\n")
file.write("iris,blue,perennial\n")
file.write("poinsettia,red,perennial\n")
file.write("sunflower,yellow,annual\n")
# Read the file contents and format the information about each row
def contents_of_file(filename):
return_string = ""
# Call the function to create the file
create_file(filename)
# Open the file
with open(filename, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
# Read the rows of the file into a dictionary
for row in csv_reader:
# Process each item of the dictionary
return_string += "a {} {} is {}\n".format(row["color"], row["name"], row["type"])
return return_string
#Call the function
print(contents_of_file("flowers.csv"))
# 02. Using the CSV file of flowers again, fill in the gaps of the contents_of_file
# function to process the data without turning it into a dictionary. How do you skip
# over the header record with the field names?
# import os
# import csv
#
# # Create a file with data in it
# def create_file(filename):
# with open(filename, "w") as file:
# file.write("name,color,type\n")
# file.write("carnation,pink,annual\n")
# file.write("daffodil,yellow,perennial\n")
# file.write("iris,blue,perennial\n")
# file.write("poinsettia,red,perennial\n")
# file.write("sunflower,yellow,annual\n")
#
# # Read the file contents and format the information about each row
# def contents_of_file(filename):
# return_string = ""
#
# # Call the function to create the file
# create_file(filename)
#
# # Open the file
# ___
# # Read the rows of the file
# rows = ___
# # Process each row
# for row in rows:
# ___ = row
# # Format the return string for data rows only
#
# return_string += "a {} {} is {}\n".format(___)
# return return_string
#
# #Call the function
# print(contents_of_file("flowers.csv"))
import os
import csv
# Create a file with data in it
def create_file(filename):
with open(filename, "w") as file:
file.write("name,color,type\n")
file.write("carnation,pink,annual\n")
file.write("daffodil,yellow,perennial\n")
file.write("iris,blue,perennial\n")
file.write("poinsettia,red,perennial\n")
file.write("sunflower,yellow,annual\n")
# Read the file contents and format the information about each row
def contents_of_file(filename):
return_string = ""
# Call the function to create the file
create_file(filename)
# Open the file
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
# Read the rows of the file
rows = reader
# Process each row
for row in rows:
name, color, typ = row
# Format the return string for data rows only
if name != "name":
return_string += "a {} {} is {}\n".format(color, name, typ)
return return_string
#Call the function
print(contents_of_file("flowers.csv"))
| 31.843137
| 91
| 0.667898
| 701
| 4,872
| 4.542083
| 0.169757
| 0.067839
| 0.062814
| 0.071608
| 0.799309
| 0.77701
| 0.744347
| 0.728015
| 0.71608
| 0.691583
| 0
| 0.002375
| 0.222291
| 4,872
| 153
| 92
| 31.843137
| 0.837952
| 0.6328
| 0
| 0.780488
| 0
| 0
| 0.229712
| 0.159686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.097561
| 0
| 0.243902
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a15835e23aae0e2964798080b8b4619831698462
| 14,975
|
py
|
Python
|
mstrio/api/objects.py
|
LLejoly/mstrio-py
|
497fb041318d0def12cf72917ede2c02c1808067
|
[
"Apache-2.0"
] | null | null | null |
mstrio/api/objects.py
|
LLejoly/mstrio-py
|
497fb041318d0def12cf72917ede2c02c1808067
|
[
"Apache-2.0"
] | null | null | null |
mstrio/api/objects.py
|
LLejoly/mstrio-py
|
497fb041318d0def12cf72917ede2c02c1808067
|
[
"Apache-2.0"
] | null | null | null |
from mstrio.utils.helper import response_handler
def get_object_info(connection, id, type, error_msg=None):
"""Get information for a specific object in a specific project; if you do
not specify a project ID, you get information for the object in all
projects.
You identify the object with the object ID and object type. You specify
the object type as a query parameter; possible values for object type are
provided in EnumDSSXMLObjectTypes.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
headers = {}
if type == "project":
headers = {'X-MSTR-ProjectID': None}
response = connection.session.get(url=connection.base_url + '/api/objects/' + id,
headers=headers,
params={'type': type})
if not response.ok:
if error_msg is None:
error_msg = "Error getting information for the specific object."
response_handler(response, error_msg, whitelist=[('ERR001', 500)])
return response
def delete_object(connection, id, type, error_msg=None):
"""Get information for a specific object in a specific project; if you do
not specify a project ID, you get information for the object in all
projects.
You identify the object with the object ID and object type. You specify
the object type as a query parameter; possible values for object type are
provided in EnumDSSXMLObjectTypes.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
headers = {}
if type == 32:
headers = {'X-MSTR-ProjectID': None}
response = connection.session.delete(url=connection.base_url + '/api/objects/' + id,
headers=headers,
params={'type': type})
if not response.ok:
if error_msg is None:
error_msg = "Error deleting object {}".format(id)
response_handler(response, error_msg)
return response
def update_object(connection, id, body, type, error_msg=None, verbose=True):
"""Get information for a specific object in a specific project; if you do
not specify a project ID, you get information for the object in all
projects.
You identify the object with the object ID and object type. You specify
the object type as a query parameter; possible values for object type are
provided in EnumDSSXMLObjectTypes.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
body: (object): body of the response
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
headers = {}
if type == 32:
headers = {'X-MSTR-ProjectID': str(id)}
response = connection.session.put(url=connection.base_url + '/api/objects/' + id,
headers=headers,
params={'type': type},
json=body)
if not response.ok:
if error_msg is None:
error_msg = "Error updating object {}".format(id)
response_handler(response, error_msg, verbose=verbose)
return response
def copy_object(connection, id, name, folder_id, type, error_msg=None):
"""Create a copy of a specific object.
You identify the object with the object ID and object type. You obtain the
authorization token needed to execute the request using POST /auth/login;
you obtain the project ID using GET /projects. You pass the authorization
token and the project ID in the request header. You specify the object ID in
the path of the request and object type as a query parameter; possible
values for object type are provided in EnumDSSXMLObjectTypes. You specify
the name and location (folder ID) of the new object in the body of the
request. If you do not specify a new name, a default name is generated, such
as 'Old Name (1)'. If you do not specify a folder ID, the object is saved in
the same folder as the source object.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): One of EnumDSSXMLObjectTypes. Ex. 34 (User or usergroup),
44 (Security Role), 32 (Project), 8 (Folder), 36 (type of I-Server
configuration)
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
connection._validate_project_selected()
body = {
"name": name,
"folderId": folder_id
}
response = connection.session.post(url=connection.base_url + '/api/objects/' + id + '/copy',
params={'type': type},
json=body)
if not response.ok:
if error_msg is None:
error_msg = "Error creating a copy of object {}".format(id)
response_handler(response, error_msg)
return response
def get_vldb_settings(connection, id, type, project_id=None, error_msg=None):
"""Get vldb settings for an object.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): DssXmlTypeReportDefinition(3) for Dataset and
DssXmlTypeDocumentDefinition(55) for document/dossier
project_id: project ID
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
headers = {}
if project_id:
headers = {'X-MSTR-ProjectID': project_id}
else:
connection._validate_project_selected()
headers = {'X-MSTR-ProjectID': connection.project_id}
response = connection.session.get(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets",
params={'type': type},
headers=headers)
if not response.ok:
if error_msg is None:
error_msg = "Error getting VLDB settings for object '{}'".format(id)
response_handler(response, error_msg)
return response
def delete_vldb_settings(connection, id, type, project_id=None, error_msg=None):
"""Delete all customized vldb settings in one object, this operation will
reset all vldb settings to default.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): DssXmlTypeReportDefinition(3) for Dataset and
DssXmlTypeDocumentDefinition(55) for document/dossier
project_id: project ID
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
headers = {}
if project_id:
headers = {'X-MSTR-ProjectID': project_id}
else:
connection._validate_project_selected()
headers = {'X-MSTR-ProjectID': connection.project_id}
response = connection.session.delete(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets",
params={'type': type},
headers=headers)
if not response.ok:
if error_msg is None:
error_msg = "Error resetting all custom vldb settings to default for object '{}'".format(id)
response_handler(response, error_msg)
return response
def set_vldb_settings(connection, id, type, name, body, project_id=None, error_msg=None):
"""Set vldb settings for one property set in one object.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): Object ID
type (int): DssXmlTypeReportDefinition(3) for Dataset and
DssXmlTypeDocumentDefinition(55) for document/dossier
name: property set name
project_id: project ID
body: [{"name": "string",
"value": {}}]
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response object returned by the MicroStrategy REST server
"""
headers = {}
if project_id:
headers = {'X-MSTR-ProjectID': project_id}
else:
connection._validate_project_selected()
headers = {'X-MSTR-ProjectID': connection.project_id}
response = connection.session.put(url=f"{connection.base_url}/api/objects/{id}/vldb/propertySets/{name}",
params={'type': type},
headers=headers,
json=body)
if not response.ok:
if error_msg is None:
error_msg = "Error setting vldb settings for object '{}'".format(id)
response_handler(response, error_msg)
return response
def create_search_objects_instance(connection, name=None, pattern=4, domain=2, root=None, object_type=None, error_msg=None):
"""Create a search instance.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
name: expression used with the pattern to do the search
pattern: specifies the nature of the search. Possible values are defined
in the EnumDSSXMLSearchTypes javadoc
domain: search domain. specifies the domain/scope of the search.
Possible values are defined in the EnumDSSXMLSearchDomain javadoc
root: folder ID of the root in which the search is done
object_type: specifies the type of objects to be searched. Possible
values are defined in the EnumDSSObjectType javadoc
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response returned by the MicroStrategy REST server
"""
connection._validate_project_selected()
response = connection.session.post(url=f"{connection.base_url}/api/objects",
headers={'X-MSTR-ProjectID': connection.project_id},
params={'name': name,
'pattern': pattern,
'domain': domain,
'root': root,
'type': object_type})
if not response.ok:
if error_msg is None:
error_msg = "Error getting objects."
response_handler(response, error_msg)
return response
def get_objects(connection, search_id, offset=0, limit=-1, get_tree=False, error_msg=None):
"""Get list of objects from metadata.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
search_id: ID for the results of a previous search stored in I-Server
memory
offset: starting point within the collection of returned results. Used
to control paging behavior.
limit: maximum number of items returned for a single request. Used to
control paging behavior
get_tree: specifies that the search results should be displayed in
a tree structure instead of a list. The ancestors of the searched
objects are the nodes and the searched objects are the leaves of
the tree.
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
HTTP response returned by the MicroStrategy REST server
"""
connection._validate_project_selected
response = connection.session.get(url=f"{connection.base_url}/api/objects",
headers={'X-MSTR-ProjectID': connection.project_id},
params={'searchId': search_id,
'offset': offset,
'limit': limit,
'getTree': get_tree})
if not response.ok:
if error_msg is None:
error_msg = "Error getting objects."
response_handler(response, error_msg)
return response
def get_objects_async(future_session, connection, search_id, offset=0, limit=-1, get_tree=False, error_msg=None):
"""Get list of objects from metadata asynchronously.
Args:
connection(object): MicroStrategy connection object returned by
`connection.Connection()`.
search_id: ID for the results of a previous search stored in I-Server
memory
offset: starting point within the collection of returned results. Used
to control paging behavior.
limit: maximum number of items returned for a single request. Used to
control paging behavior.
get_tree: specifies that the search results should be displayed in
a tree structure instead of a list. The ancestors of the searched
objects are the nodes and the searched objects are the leaves of
the tree.
Returns:
HTTP response returned by the MicroStrategy REST server
"""
connection._validate_project_selected()
url = connection.base_url + '/api/objects'
headers = {'X-MSTR-ProjectID': connection.project_id}
params = {'searchId': search_id,
'offset': offset,
'limit': limit,
'getTree': get_tree}
future = future_session.get(url=url, headers=headers, params=params)
return future
| 43.031609
| 124
| 0.628581
| 1,768
| 14,975
| 5.244344
| 0.118778
| 0.039689
| 0.029336
| 0.027179
| 0.832938
| 0.813632
| 0.800043
| 0.796592
| 0.783218
| 0.774267
| 0
| 0.005881
| 0.29596
| 14,975
| 347
| 125
| 43.15562
| 0.873565
| 0.506778
| 0
| 0.659259
| 0
| 0
| 0.143326
| 0.035943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.007407
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a1d483359e5106fa5ea4e0ae4b7b51be72a4d5dc
| 156
|
py
|
Python
|
candas/__init__.py
|
JulianWgs/candas
|
7543c018cd1d17d35e4d1f20445d6910b9796430
|
[
"MIT"
] | 2
|
2020-04-17T09:35:13.000Z
|
2020-04-17T12:48:15.000Z
|
candas/__init__.py
|
JulianWgs/candas
|
7543c018cd1d17d35e4d1f20445d6910b9796430
|
[
"MIT"
] | null | null | null |
candas/__init__.py
|
JulianWgs/candas
|
7543c018cd1d17d35e4d1f20445d6910b9796430
|
[
"MIT"
] | null | null | null |
""" __init__ module for candas package """
from . dataframe import from_file, from_database, load_dbc, from_fake
from . database import initialize_database
| 39
| 69
| 0.801282
| 21
| 156
| 5.52381
| 0.666667
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 156
| 3
| 70
| 52
| 0.852941
| 0.217949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62ae74edd15738b15a4b29f768cbf11d09378bbc
| 199
|
py
|
Python
|
src/apps/sitio/forms.py
|
yrrodriguezb/wellsolutions
|
f66304167f12275bc3e427e3a813e28b56ef0d19
|
[
"MIT"
] | null | null | null |
src/apps/sitio/forms.py
|
yrrodriguezb/wellsolutions
|
f66304167f12275bc3e427e3a813e28b56ef0d19
|
[
"MIT"
] | null | null | null |
src/apps/sitio/forms.py
|
yrrodriguezb/wellsolutions
|
f66304167f12275bc3e427e3a813e28b56ef0d19
|
[
"MIT"
] | 1
|
2021-07-30T02:31:29.000Z
|
2021-07-30T02:31:29.000Z
|
from django import forms
class ContactForm(forms.Form):
nombre = forms.CharField(max_length=100)
email = forms.CharField( max_length=100)
mensaje = forms.CharField(widget=forms.Textarea)
| 33.166667
| 52
| 0.758794
| 26
| 199
| 5.730769
| 0.615385
| 0.281879
| 0.228188
| 0.308725
| 0.348993
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035088
| 0.140704
| 199
| 6
| 52
| 33.166667
| 0.836257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
62d93a69ed62940f2768e72afa87698bc6c78599
| 88
|
py
|
Python
|
naked/funcs/sklearn/__init__.py
|
MaxHalford/naked
|
f1990a22903db61e6ac74ce1eccf5d43537ebfc4
|
[
"MIT"
] | 26
|
2021-02-05T09:46:44.000Z
|
2021-11-14T19:40:47.000Z
|
naked/funcs/sklearn/__init__.py
|
MaxHalford/naked
|
f1990a22903db61e6ac74ce1eccf5d43537ebfc4
|
[
"MIT"
] | null | null | null |
naked/funcs/sklearn/__init__.py
|
MaxHalford/naked
|
f1990a22903db61e6ac74ce1eccf5d43537ebfc4
|
[
"MIT"
] | 1
|
2021-08-19T06:21:28.000Z
|
2021-08-19T06:21:28.000Z
|
from . import feature_extraction
from . import linear_model
from . import preprocessing
| 22
| 32
| 0.829545
| 11
| 88
| 6.454545
| 0.636364
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 88
| 3
| 33
| 29.333333
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62f7bb0ffcc20e55e7a0ac1b837e2c03d15355e4
| 3,280
|
py
|
Python
|
scripts/3_postprocess_baseline_with_mw/3b_plot_tps_rt_two_mw.py
|
gokul-uf/asl-fall-2017
|
83e882d9d4c52bdd279b4e3eed8cd7ac768e88d7
|
[
"MIT"
] | 1
|
2018-06-13T16:57:59.000Z
|
2018-06-13T16:57:59.000Z
|
scripts/3_postprocess_baseline_with_mw/3b_plot_tps_rt_two_mw.py
|
gokul-uf/asl-fall-2017
|
83e882d9d4c52bdd279b4e3eed8cd7ac768e88d7
|
[
"MIT"
] | null | null | null |
scripts/3_postprocess_baseline_with_mw/3b_plot_tps_rt_two_mw.py
|
gokul-uf/asl-fall-2017
|
83e882d9d4c52bdd279b4e3eed8cd7ac768e88d7
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from glob import glob
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
thread_range = [8, 16, 32, 64]
def extract_n_plot(filename, label):
num_vc = []
val = []
std = []
with open(filename) as f:
for line in f:
vc, v, s = [float(x) for x in line.strip().split(",")][:3]
num_vc.append(vc)
val.append(v)
std.append(s)
plt.errorbar(x = num_vc, y = val, yerr = std, label = label, capsize = 2)
plt.figure()
plt.title("Response Time vs. Num of Virtual Clients \n (Read-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_r-o_t_{}_rt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylabel("Response Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.ylim(ymin=0)
plt.savefig("3b_two-mw_r-o_rt.png")
plt.figure()
plt.title("Throughput vs. Num of Virtual Clients \n (Read-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_r-o_t_{}_tps.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylabel("Throughput (request / second)")
plt.xlabel("Number of Virtual Clients")
plt.ylim(ymin=0)
plt.savefig("3b_two-mw_r-o_tps.png")
plt.figure()
plt.title("Queueing Time vs. Num of Virtual Clients \n (Read-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_r-o_t_{}_wt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylabel("Queueing Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.ylim(ymin=0)
plt.savefig("3b_two-mw_r-o_wt.png")
plt.figure()
plt.title("Response Time vs. Num of Virtual Clients \n (Write-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_w-o_t_{}_rt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylim(ymin=0)
plt.ylabel("Response Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.savefig("3b_two-mw_w-o_rt.png")
plt.figure()
plt.title("Throughput vs. Num of Virtual Clients \n (Write-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_w-o_t_{}_tps.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylim(ymin=0)
plt.ylabel("Throughput (request / second)")
plt.xlabel("Number of Virtual Clients")
plt.savefig("3b_two-mw_w-o_tps.png")
plt.figure()
plt.title("Queueing Time vs. Num of Virtual Clients \n (Write-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_w-o_t_{}_wt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylim(ymin=0)
plt.ylabel("Queueing Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.savefig("3b_two-mw_w-o_wt.png")
| 35.268817
| 93
| 0.741159
| 548
| 3,280
| 4.244526
| 0.175182
| 0.069647
| 0.082545
| 0.060189
| 0.849527
| 0.848237
| 0.848237
| 0.848237
| 0.848237
| 0.848237
| 0
| 0.007199
| 0.110671
| 3,280
| 93
| 94
| 35.268817
| 0.790195
| 0
| 0
| 0.564706
| 0
| 0
| 0.374886
| 0.079244
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.082353
| 0
| 0.094118
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1a0b88396ec45679c26704e6e4ae73bf1859b917
| 34
|
py
|
Python
|
amazing_semantic_segmentation/builders/__init__.py
|
jarodhanko/Amazing-Semantic-Segmentation
|
b92596d4c17a72492e848af9e8d7152386f035b2
|
[
"Apache-2.0"
] | null | null | null |
amazing_semantic_segmentation/builders/__init__.py
|
jarodhanko/Amazing-Semantic-Segmentation
|
b92596d4c17a72492e848af9e8d7152386f035b2
|
[
"Apache-2.0"
] | null | null | null |
amazing_semantic_segmentation/builders/__init__.py
|
jarodhanko/Amazing-Semantic-Segmentation
|
b92596d4c17a72492e848af9e8d7152386f035b2
|
[
"Apache-2.0"
] | null | null | null |
from .model_builder import builder
| 34
| 34
| 0.882353
| 5
| 34
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1a1e53004a8d9a9d7b9267dea467d002209664bd
| 39
|
py
|
Python
|
backend/backend/settings/__init__.py
|
swang192/portunus
|
1cc7243bcde4af756b460d306755505d23c9b949
|
[
"MIT"
] | null | null | null |
backend/backend/settings/__init__.py
|
swang192/portunus
|
1cc7243bcde4af756b460d306755505d23c9b949
|
[
"MIT"
] | null | null | null |
backend/backend/settings/__init__.py
|
swang192/portunus
|
1cc7243bcde4af756b460d306755505d23c9b949
|
[
"MIT"
] | null | null | null |
from .zygoat_settings import * # noqa
| 19.5
| 38
| 0.74359
| 5
| 39
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 39
| 1
| 39
| 39
| 0.875
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c56dd416ed7662fdd6c992d30c1bb403d770a290
| 22
|
py
|
Python
|
gwr/__init__.py
|
yurytsoy/gwr
|
0258ccc6b8f36a28bee2b79e82703c463ebbcc8f
|
[
"MIT"
] | null | null | null |
gwr/__init__.py
|
yurytsoy/gwr
|
0258ccc6b8f36a28bee2b79e82703c463ebbcc8f
|
[
"MIT"
] | 1
|
2019-07-19T04:06:34.000Z
|
2019-09-07T14:08:03.000Z
|
gwr/__init__.py
|
yurytsoy/gwr
|
0258ccc6b8f36a28bee2b79e82703c463ebbcc8f
|
[
"MIT"
] | null | null | null |
from .gwr import GWR
| 7.333333
| 20
| 0.727273
| 4
| 22
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 22
| 2
| 21
| 11
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3db4c621880004583373b10c13764d57fa27c0bb
| 202
|
py
|
Python
|
tests/views.py
|
geelweb/geelweb-django-contactform
|
f4f83e285a77c38205b5097cc22aa1354e3b618d
|
[
"MIT"
] | 2
|
2020-12-13T00:10:51.000Z
|
2021-03-07T10:35:08.000Z
|
tests/views.py
|
geelweb/geelweb-django-contactform
|
f4f83e285a77c38205b5097cc22aa1354e3b618d
|
[
"MIT"
] | null | null | null |
tests/views.py
|
geelweb/geelweb-django-contactform
|
f4f83e285a77c38205b5097cc22aa1354e3b618d
|
[
"MIT"
] | 2
|
2016-04-09T14:10:26.000Z
|
2016-10-30T00:40:53.000Z
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse('Page content')
def custom(request):
return render(request, 'custom.html', {})
| 22.444444
| 45
| 0.752475
| 25
| 202
| 6.08
| 0.6
| 0.131579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143564
| 202
| 8
| 46
| 25.25
| 0.878613
| 0
| 0
| 0
| 0
| 0
| 0.113861
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ad0069b8182e1e872e838f597163cbabe5c2a596
| 64
|
py
|
Python
|
Online-Judges/CodingBat/Python/String-01/04-make_out_word.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3
|
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/String-01/04-make_out_word.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/String-01/04-make_out_word.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def make_out_word(out, word):
return (out[:2]+word+out[2:])
| 21.333333
| 33
| 0.640625
| 12
| 64
| 3.25
| 0.5
| 0.358974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.140625
| 64
| 2
| 34
| 32
| 0.672727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
9a9e544102bbe4fbf970784079363ede606b8b87
| 53
|
py
|
Python
|
nineml/abstraction/componentclass/visitors/__init__.py
|
INCF/nineml-python
|
062a2ac8a9be97cee4dad02938e3858e051cf07c
|
[
"BSD-3-Clause"
] | 6
|
2017-12-26T14:15:28.000Z
|
2021-10-10T22:45:51.000Z
|
nineml/abstraction/componentclass/visitors/__init__.py
|
INCF/nineml-python
|
062a2ac8a9be97cee4dad02938e3858e051cf07c
|
[
"BSD-3-Clause"
] | 25
|
2017-07-05T03:53:53.000Z
|
2021-01-19T14:14:05.000Z
|
nineml/abstraction/componentclass/visitors/__init__.py
|
INCF/nineml-python
|
062a2ac8a9be97cee4dad02938e3858e051cf07c
|
[
"BSD-3-Clause"
] | 5
|
2017-12-26T14:15:12.000Z
|
2021-10-10T22:45:39.000Z
|
from .queriers import ComponentClassInterfaceInferer
| 26.5
| 52
| 0.90566
| 4
| 53
| 12
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 53
| 1
| 53
| 53
| 0.979592
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ab3201bd77e250379aa092712abd533d3ad7a62
| 22,893
|
py
|
Python
|
conbench/tests/api/test_benchmarks.py
|
ursa-labs/conbench
|
67efab4d11885796bb012b9ab8b003abce7a8d43
|
[
"MIT"
] | 31
|
2021-03-26T20:07:03.000Z
|
2021-08-18T14:58:08.000Z
|
conbench/tests/api/test_benchmarks.py
|
ursacomputing/conbench
|
67efab4d11885796bb012b9ab8b003abce7a8d43
|
[
"MIT"
] | 10
|
2021-05-12T20:17:02.000Z
|
2021-09-10T14:22:24.000Z
|
conbench/tests/api/test_benchmarks.py
|
ursa-labs/conbench
|
67efab4d11885796bb012b9ab8b003abce7a8d43
|
[
"MIT"
] | 3
|
2021-05-06T16:00:11.000Z
|
2021-09-10T18:48:19.000Z
|
import copy
import decimal
import pytest
from ...api._examples import _api_benchmark_entity
from ...entities._entity import NotFound
from ...entities.distribution import Distribution
from ...entities.summary import Summary
from ...tests.api import _asserts, _fixtures
from ...tests.helpers import _uuid
ARROW_REPO = "https://github.com/apache/arrow"
CONBENCH_REPO = "https://github.com/conbench/conbench"
def _expected_entity(summary):
return _api_benchmark_entity(
summary.id,
summary.case_id,
summary.info_id,
summary.context_id,
summary.batch_id,
summary.run_id,
summary.case.name,
)
class TestBenchmarkGet(_asserts.GetEnforcer):
url = "/api/benchmarks/{}/"
public = True
def _create(self, name=None, results=None, unit=None, sha=None):
return _fixtures.summary(
name=name,
results=results,
unit=unit,
sha=sha,
)
def test_get_benchmark(self, client):
self.authenticate(client)
summary = self._create()
response = client.get(f"/api/benchmarks/{summary.id}/")
self.assert_200_ok(response, _expected_entity(summary))
def test_get_benchmark_regression(self, client):
self.authenticate(client)
name = _uuid()
# create a distribution history & a regression
self._create(
name=name,
results=_fixtures.RESULTS_DOWN[0],
unit="i/s",
sha=_fixtures.GRANDPARENT,
)
self._create(
name=name,
results=_fixtures.RESULTS_DOWN[1],
unit="i/s",
sha=_fixtures.PARENT,
)
summary = self._create(
name=name,
results=_fixtures.RESULTS_DOWN[2],
unit="i/s",
)
expected = _expected_entity(summary)
expected["stats"].update(
{
"data": ["1.000000", "2.000000", "3.000000"],
"iqr": "1.000000",
"iterations": 3,
"max": "3.000000",
"mean": "2.000000",
"median": "2.000000",
"min": "1.000000",
"q1": "1.500000",
"q3": "2.500000",
"stdev": "1.000000",
"times": [],
"z_score": "-{:.6f}".format(abs(_fixtures.Z_SCORE_DOWN)),
"z_regression": True,
"unit": "i/s",
}
)
response = client.get(f"/api/benchmarks/{summary.id}/")
self.assert_200_ok(response, expected)
def test_get_benchmark_regression_less_is_better(self, client):
self.authenticate(client)
name = _uuid()
# create a distribution history & a regression
self._create(
name=name,
results=_fixtures.RESULTS_UP[0],
unit="s",
sha=_fixtures.GRANDPARENT,
)
self._create(
name=name,
results=_fixtures.RESULTS_UP[1],
unit="s",
sha=_fixtures.PARENT,
)
summary = self._create(
name=name,
results=_fixtures.RESULTS_UP[2],
unit="s",
)
expected = _expected_entity(summary)
expected["stats"].update(
{
"data": ["10.000000", "20.000000", "30.000000"],
"iqr": "10.000000",
"iterations": 3,
"max": "30.000000",
"mean": "20.000000",
"median": "20.000000",
"min": "10.000000",
"q1": "15.000000",
"q3": "25.000000",
"stdev": "10.000000",
"times": [],
"z_score": "-{:.6f}".format(abs(_fixtures.Z_SCORE_UP)),
"z_regression": True,
}
)
response = client.get(f"/api/benchmarks/{summary.id}/")
self.assert_200_ok(response, expected)
def test_get_benchmark_improvement(self, client):
self.authenticate(client)
name = _uuid()
# create a distribution history & a improvement
self._create(
name=name,
results=_fixtures.RESULTS_UP[0],
unit="i/s",
sha=_fixtures.GRANDPARENT,
)
self._create(
name=name,
results=_fixtures.RESULTS_UP[1],
unit="i/s",
sha=_fixtures.PARENT,
)
summary = self._create(
name=name,
results=_fixtures.RESULTS_UP[2],
unit="i/s",
)
expected = _expected_entity(summary)
expected["stats"].update(
{
"data": ["10.000000", "20.000000", "30.000000"],
"iqr": "10.000000",
"iterations": 3,
"max": "30.000000",
"mean": "20.000000",
"median": "20.000000",
"min": "10.000000",
"q1": "15.000000",
"q3": "25.000000",
"stdev": "10.000000",
"times": [],
"z_score": "{:.6f}".format(abs(_fixtures.Z_SCORE_UP)),
"z_improvement": True,
"unit": "i/s",
}
)
response = client.get(f"/api/benchmarks/{summary.id}/")
self.assert_200_ok(response, expected)
def test_get_benchmark_improvement_less_is_better(self, client):
self.authenticate(client)
name = _uuid()
# create a distribution history & a improvement
self._create(
name=name,
results=_fixtures.RESULTS_DOWN[0],
unit="s",
sha=_fixtures.GRANDPARENT,
)
self._create(
name=name,
results=_fixtures.RESULTS_DOWN[1],
unit="s",
sha=_fixtures.PARENT,
)
summary = self._create(
name=name,
results=_fixtures.RESULTS_DOWN[2],
unit="s",
)
expected = _expected_entity(summary)
expected["stats"].update(
{
"data": ["1.000000", "2.000000", "3.000000"],
"iqr": "1.000000",
"iterations": 3,
"max": "3.000000",
"mean": "2.000000",
"median": "2.000000",
"min": "1.000000",
"q1": "1.500000",
"q3": "2.500000",
"stdev": "1.000000",
"times": [],
"z_score": "{:.6f}".format(abs(_fixtures.Z_SCORE_DOWN)),
"z_improvement": True,
}
)
response = client.get(f"/api/benchmarks/{summary.id}/")
self.assert_200_ok(response, expected)
class TestBenchmarkDelete(_asserts.DeleteEnforcer):
url = "/api/benchmarks/{}/"
def test_delete_benchmark(self, client):
self.authenticate(client)
summary = _fixtures.summary()
# can get before delete
Summary.one(id=summary.id)
# delete
response = client.delete(f"/api/benchmarks/{summary.id}/")
self.assert_204_no_content(response)
# cannot get after delete
with pytest.raises(NotFound):
Summary.one(id=summary.id)
class TestBenchmarkList(_asserts.ListEnforcer):
url = "/api/benchmarks/"
public = True
def test_benchmark_list(self, client):
self.authenticate(client)
summary = _fixtures.summary()
response = client.get("/api/benchmarks/")
self.assert_200_ok(response, contains=_expected_entity(summary))
def test_benchmark_list_filter_by_name(self, client):
self.authenticate(client)
_fixtures.summary(name="aaa")
summary = _fixtures.summary(name="bbb")
_fixtures.summary(name="ccc")
response = client.get("/api/benchmarks/?name=bbb")
self.assert_200_ok(response, [_expected_entity(summary)])
def test_benchmark_list_filter_by_batch_id(self, client):
self.authenticate(client)
_fixtures.summary(batch_id="10")
summary = _fixtures.summary(batch_id="20")
_fixtures.summary(batch_id="30")
response = client.get("/api/benchmarks/?batch_id=20")
self.assert_200_ok(response, [_expected_entity(summary)])
def test_benchmark_list_filter_by_run_id(self, client):
self.authenticate(client)
_fixtures.summary(run_id="100")
summary = _fixtures.summary(run_id="200")
_fixtures.summary(run_id="300")
response = client.get("/api/benchmarks/?run_id=200")
self.assert_200_ok(response, [_expected_entity(summary)])
class TestBenchmarkPost(_asserts.PostEnforcer):
url = "/api/benchmarks/"
valid_payload = _fixtures.VALID_PAYLOAD
required_fields = [
"batch_id",
"context",
"info",
"machine_info",
"run_id",
"stats",
"tags",
"timestamp",
]
def test_create_benchmark(self, client):
self.authenticate(client)
response = client.post("/api/benchmarks/", json=self.valid_payload)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_benchmark_normalizes_data(self, client):
self.authenticate(client)
response = client.post("/api/benchmarks/", json=self.valid_payload)
summary_1 = Summary.one(id=response.json["id"])
data = copy.deepcopy(self.valid_payload)
data["run_id"] = data["run_id"] + "_X"
response = client.post("/api/benchmarks/", json=data)
summary_2 = Summary.one(id=response.json["id"])
assert summary_1.id != summary_2.id
assert summary_1.case_id == summary_2.case_id
assert summary_1.info_id == summary_2.info_id
assert summary_1.context_id == summary_2.context_id
assert summary_1.run.machine_id == summary_2.run.machine_id
assert summary_1.run_id != summary_2.run_id
assert summary_1.run.commit_id == summary_2.run.commit_id
def test_create_benchmark_can_specify_run_and_batch_id(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
run_id, batch_id = _uuid(), _uuid()
data["run_id"] = run_id
data["batch_id"] = batch_id
response = client.post("/api/benchmarks/", json=data)
summary = Summary.one(id=response.json["id"])
assert summary.run_id == run_id
assert summary.batch_id == batch_id
def test_create_benchmark_cannot_omit_batch_id(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
# omit
del data["batch_id"]
response = client.post("/api/benchmarks/", json=data)
message = {
"batch_id": ["Missing data for required field."],
}
self.assert_400_bad_request(response, message)
# null
data["batch_id"] = None
response = client.post("/api/benchmarks/", json=data)
message = {
"batch_id": ["Field may not be null."],
}
self.assert_400_bad_request(response, message)
def test_create_benchmark_cannot_omit_run_id(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
# omit
del data["run_id"]
response = client.post("/api/benchmarks/", json=data)
message = {
"run_id": ["Missing data for required field."],
}
self.assert_400_bad_request(response, message)
# null
data["run_id"] = None
response = client.post("/api/benchmarks/", json=data)
message = {
"run_id": ["Field may not be null."],
}
self.assert_400_bad_request(response, message)
def test_nested_schema_validation(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
del data["stats"]["iterations"]
del data["github"]["commit"]
del data["machine_info"]["os_name"]
data["machine_info"]["os_version"] = None
data["stats"]["extra"] = "field"
data["github"]["extra"] = "field"
data["machine_info"]["extra"] = "field"
response = client.post("/api/benchmarks/", json=data)
message = {
"github": {
"extra": ["Unknown field."],
"commit": ["Missing data for required field."],
},
"machine_info": {
"extra": ["Unknown field."],
"os_name": ["Missing data for required field."],
"os_version": ["Field may not be null."],
},
"stats": {
"extra": ["Unknown field."],
"iterations": ["Missing data for required field."],
},
}
self.assert_400_bad_request(response, message)
def _assert_none_commit(self, response):
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == ""
assert summary.run.commit.repository == ""
assert summary.run.commit.parent is None
return summary, new_id
def test_create_no_commit_context(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
del data["github"]
# create benchmark without commit context
response = client.post("/api/benchmarks/", json=data)
summary, new_id = self._assert_none_commit(response)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
# create another benchmark without commit context
# (test duplicate key duplicate key -- commit_index)
response = client.post("/api/benchmarks/", json=data)
summary, new_id = self._assert_none_commit(response)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_empty_commit_context(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
data["github"]["commit"] = ""
data["github"]["repository"] = ""
# create benchmark without commit context
response = client.post("/api/benchmarks/", json=data)
summary, new_id = self._assert_none_commit(response)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
# create another benchmark without commit context
# (test duplicate key duplicate key -- commit_index)
response = client.post("/api/benchmarks/", json=data)
summary, new_id = self._assert_none_commit(response)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_unknown_commit_context(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
data["github"]["commit"] = "unknown commit"
data["github"]["repository"] = ARROW_REPO
# create benchmark with unknown commit context
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == "unknown commit"
assert summary.run.commit.repository == ARROW_REPO
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
# create another benchmark with unknown commit context
# (test duplicate key duplicate key -- commit_index)
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == "unknown commit"
assert summary.run.commit.repository == ARROW_REPO
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_different_git_repo_format(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
data["github"]["commit"] = "testing repository with git@g"
data["github"]["repository"] = "git@github.com:apache/arrow"
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == "testing repository with git@g"
assert summary.run.commit.repository == ARROW_REPO
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_repo_not_full_url(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
data["github"]["commit"] = "testing repository with just org/repo"
data["github"]["repository"] = "apache/arrow"
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == "testing repository with just org/repo"
assert summary.run.commit.repository == ARROW_REPO
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_allow_just_repository(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
data["github"]["commit"] = ""
data["github"]["repository"] = ARROW_REPO
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == ""
assert summary.run.commit.repository == ARROW_REPO
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
# And again with a different repository with an empty sha
data["run_id"] = _uuid()
data["github"]["commit"] = ""
data["github"]["repository"] = CONBENCH_REPO
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == ""
assert summary.run.commit.repository == CONBENCH_REPO
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_allow_just_sha(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["run_id"] = _uuid()
data["github"]["commit"] = "something something"
data["github"]["repository"] = ""
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary = Summary.one(id=new_id)
assert summary.run.commit.sha == "something something"
assert summary.run.commit.repository == ""
assert summary.run.commit.parent is None
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary), location)
def test_create_benchmark_distribution(self, client):
self.authenticate(client)
data = copy.deepcopy(self.valid_payload)
data["tags"]["name"] = _uuid()
# first result
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary_1 = Summary.one(id=new_id)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary_1), location)
case_id = summary_1.case_id
# after one result
distributions = Distribution.all(case_id=case_id)
assert len(distributions) == 1
assert distributions[0].unit == "s"
assert distributions[0].observations == 1
assert distributions[0].mean_mean == decimal.Decimal("0.03636900000000000000")
assert distributions[0].mean_sd is None
assert distributions[0].min_mean == decimal.Decimal("0.00473300000000000000")
assert distributions[0].min_sd is None
assert distributions[0].max_mean == decimal.Decimal("0.14889600000000000000")
assert distributions[0].max_sd is None
assert distributions[0].median_mean == decimal.Decimal("0.00898800000000000000")
assert distributions[0].median_sd is None
# second result
response = client.post("/api/benchmarks/", json=data)
new_id = response.json["id"]
summary_2 = Summary.one(id=new_id)
location = "http://localhost/api/benchmarks/%s/" % new_id
self.assert_201_created(response, _expected_entity(summary_2), location)
assert summary_1.case_id == summary_2.case_id
assert summary_1.context_id == summary_2.context_id
assert summary_1.run.machine_id == summary_2.run.machine_id
assert summary_1.run.commit_id == summary_2.run.commit_id
# after two results
distributions = Distribution.all(case_id=case_id)
assert len(distributions) == 1
assert distributions[0].unit == "s"
assert distributions[0].observations == 2
assert distributions[0].mean_mean == decimal.Decimal("0.03636900000000000000")
assert distributions[0].mean_sd == decimal.Decimal("0")
assert distributions[0].min_mean == decimal.Decimal("0.00473300000000000000")
assert distributions[0].min_sd == decimal.Decimal("0")
assert distributions[0].max_mean == decimal.Decimal("0.14889600000000000000")
assert distributions[0].max_sd == decimal.Decimal("0")
assert distributions[0].median_mean == decimal.Decimal("0.00898800000000000000")
assert distributions[0].median_sd == decimal.Decimal("0")
| 37.591133
| 88
| 0.598611
| 2,558
| 22,893
| 5.147772
| 0.077013
| 0.049362
| 0.030377
| 0.047388
| 0.834067
| 0.806577
| 0.78911
| 0.774453
| 0.741798
| 0.7298
| 0
| 0.041481
| 0.273402
| 22,893
| 608
| 89
| 37.652961
| 0.75015
| 0.034902
| 0
| 0.633663
| 0
| 0
| 0.150218
| 0.020709
| 0
| 0
| 0
| 0
| 0.194059
| 1
| 0.053465
| false
| 0
| 0.017822
| 0.00396
| 0.10099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9ab4c2c070923e62668e0f7e59ee85e94c0478af
| 9,844
|
py
|
Python
|
stac_fastapi/pgstac/tests/api/test_api.py
|
borism/stac-fastapi
|
81015a153c1d9f36d8e12f17a1bf67370396f472
|
[
"MIT"
] | 64
|
2021-03-27T19:34:29.000Z
|
2022-03-31T07:58:58.000Z
|
stac_fastapi/pgstac/tests/api/test_api.py
|
borism/stac-fastapi
|
81015a153c1d9f36d8e12f17a1bf67370396f472
|
[
"MIT"
] | 218
|
2021-03-27T19:51:54.000Z
|
2022-03-28T12:41:56.000Z
|
stac_fastapi/pgstac/tests/api/test_api.py
|
borism/stac-fastapi
|
81015a153c1d9f36d8e12f17a1bf67370396f472
|
[
"MIT"
] | 44
|
2021-04-05T12:06:25.000Z
|
2022-03-01T12:06:29.000Z
|
from datetime import datetime, timedelta
import pytest
STAC_CORE_ROUTES = [
"GET /",
"GET /collections",
"GET /collections/{collection_id}",
"GET /collections/{collection_id}/items",
"GET /collections/{collection_id}/items/{item_id}",
"GET /conformance",
"GET /search",
"POST /search",
]
STAC_TRANSACTION_ROUTES = [
"DELETE /collections/{collection_id}",
"DELETE /collections/{collection_id}/items/{item_id}",
"POST /collections",
"POST /collections/{collection_id}/items",
"PUT /collections",
"PUT /collections/{collection_id}/items",
]
@pytest.mark.asyncio
async def test_post_search_content_type(app_client):
params = {"limit": 1}
resp = await app_client.post("search", json=params)
assert resp.headers["content-type"] == "application/geo+json"
@pytest.mark.asyncio
async def test_get_search_content_type(app_client):
resp = await app_client.get("search")
assert resp.headers["content-type"] == "application/geo+json"
@pytest.mark.asyncio
async def test_api_headers(app_client):
resp = await app_client.get("/api")
assert (
resp.headers["content-type"] == "application/vnd.oai.openapi+json;version=3.0"
)
assert resp.status_code == 200
@pytest.mark.asyncio
async def test_core_router(api_client):
core_routes = set(STAC_CORE_ROUTES)
api_routes = set(
[f"{list(route.methods)[0]} {route.path}" for route in api_client.app.routes]
)
assert not core_routes - api_routes
@pytest.mark.asyncio
async def test_transactions_router(api_client):
transaction_routes = set(STAC_TRANSACTION_ROUTES)
api_routes = set(
[f"{list(route.methods)[0]} {route.path}" for route in api_client.app.routes]
)
assert not transaction_routes - api_routes
@pytest.mark.asyncio
async def test_app_transaction_extension(
app_client, load_test_data, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
@pytest.mark.asyncio
async def test_app_query_extension(load_test_data, app_client, load_test_collection):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
params = {"query": {"proj:epsg": {"eq": item["properties"]["proj:epsg"]}}}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 1
@pytest.mark.asyncio
async def test_app_query_extension_limit_1(
load_test_data, app_client, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
params = {"limit": 1}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 1
@pytest.mark.asyncio
async def test_app_query_extension_limit_eq0(app_client):
params = {"limit": 0}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 400
@pytest.mark.asyncio
async def test_app_query_extension_limit_lt0(
load_test_data, app_client, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
params = {"limit": -1}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 400
@pytest.mark.asyncio
async def test_app_query_extension_limit_gt10000(
load_test_data, app_client, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
params = {"limit": 10001}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 400
@pytest.mark.asyncio
async def test_app_query_extension_gt(load_test_data, app_client, load_test_collection):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
params = {"query": {"proj:epsg": {"gt": item["properties"]["proj:epsg"]}}}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 0
@pytest.mark.asyncio
async def test_app_query_extension_gte(
load_test_data, app_client, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
params = {"query": {"proj:epsg": {"gte": item["properties"]["proj:epsg"]}}}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 1
@pytest.mark.asyncio
async def test_app_sort_extension(load_test_data, app_client, load_test_collection):
coll = load_test_collection
first_item = load_test_data("test_item.json")
item_date = datetime.strptime(
first_item["properties"]["datetime"], "%Y-%m-%dT%H:%M:%SZ"
)
resp = await app_client.post(f"/collections/{coll.id}/items", json=first_item)
assert resp.status_code == 200
second_item = load_test_data("test_item.json")
second_item["id"] = "another-item"
another_item_date = item_date - timedelta(days=1)
second_item["properties"]["datetime"] = another_item_date.strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
resp = await app_client.post(f"/collections/{coll.id}/items", json=second_item)
assert resp.status_code == 200
params = {
"collections": [coll.id],
"sortby": [{"field": "datetime", "direction": "desc"}],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][0]["id"] == first_item["id"]
assert resp_json["features"][1]["id"] == second_item["id"]
params = {
"collections": [coll.id],
"sortby": [{"field": "datetime", "direction": "asc"}],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json["features"][1]["id"] == first_item["id"]
assert resp_json["features"][0]["id"] == second_item["id"]
@pytest.mark.asyncio
async def test_search_invalid_date(load_test_data, app_client, load_test_collection):
coll = load_test_collection
first_item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=first_item)
assert resp.status_code == 200
params = {
"datetime": "2020-XX-01/2020-10-30",
"collections": [coll.id],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 400
@pytest.mark.asyncio
async def test_bbox_3d(load_test_data, app_client, load_test_collection):
coll = load_test_collection
first_item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=first_item)
assert resp.status_code == 200
australia_bbox = [106.343365, -47.199523, 0.1, 168.218365, -19.437288, 0.1]
params = {
"bbox": australia_bbox,
"collections": [coll.id],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 1
@pytest.mark.asyncio
async def test_app_search_response(load_test_data, app_client, load_test_collection):
coll = load_test_collection
params = {
"collections": [coll.id],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert resp_json.get("type") == "FeatureCollection"
# stac_version and stac_extensions were removed in v1.0.0-beta.3
assert resp_json.get("stac_version") is None
assert resp_json.get("stac_extensions") is None
@pytest.mark.asyncio
async def test_search_point_intersects(
load_test_data, app_client, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
point = [150.04, -33.14]
intersects = {"type": "Point", "coordinates": point}
params = {
"intersects": intersects,
"collections": [item["collection"]],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 1
@pytest.mark.asyncio
async def test_search_line_string_intersects(
load_test_data, app_client, load_test_collection
):
coll = load_test_collection
item = load_test_data("test_item.json")
resp = await app_client.post(f"/collections/{coll.id}/items", json=item)
assert resp.status_code == 200
line = [[150.04, -33.14], [150.22, -33.89]]
intersects = {"type": "LineString", "coordinates": line}
params = {
"intersects": intersects,
"collections": [item["collection"]],
}
resp = await app_client.post("/search", json=params)
assert resp.status_code == 200
resp_json = resp.json()
assert len(resp_json["features"]) == 1
| 32.27541
| 88
| 0.687627
| 1,351
| 9,844
| 4.760178
| 0.099926
| 0.064687
| 0.055979
| 0.083968
| 0.825844
| 0.807339
| 0.784948
| 0.753849
| 0.727103
| 0.705023
| 0
| 0.024657
| 0.171881
| 9,844
| 304
| 89
| 32.381579
| 0.76423
| 0.006298
| 0
| 0.621399
| 0
| 0
| 0.189673
| 0.073926
| 0
| 0
| 0
| 0
| 0.193416
| 1
| 0
| false
| 0
| 0.00823
| 0
| 0.00823
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9af5a4f9df237d47ffaadcc11e42ab8375b11ffb
| 82
|
py
|
Python
|
MiddleKit/Core/BoolAttr.py
|
PeaceWorksTechnologySolutions/w4py
|
74f5a03a63f1a93563502b908474aefaae2abda2
|
[
"MIT"
] | 18
|
2016-08-01T20:15:59.000Z
|
2019-12-24T16:00:03.000Z
|
MiddleKit/Core/BoolAttr.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-13T05:48:45.000Z
|
2020-01-09T18:29:12.000Z
|
MiddleKit/Core/BoolAttr.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-16T14:32:29.000Z
|
2020-01-03T18:52:16.000Z
|
from BasicTypeAttr import BasicTypeAttr
class BoolAttr(BasicTypeAttr):
pass
| 13.666667
| 39
| 0.804878
| 8
| 82
| 8.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 82
| 5
| 40
| 16.4
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b10d0a6f817ca630076f05be80992ec2dc874ed7
| 193
|
py
|
Python
|
src/blip_sdk/extensions/artificial_intelligence/ai_model/content_type.py
|
mirlarof/blip-sdk-python
|
f958149b2524d4340eeafad8739a33db71df45ed
|
[
"MIT"
] | 2
|
2021-07-02T20:10:48.000Z
|
2021-07-13T20:51:18.000Z
|
src/blip_sdk/extensions/artificial_intelligence/ai_model/content_type.py
|
mirlarof/blip-sdk-python
|
f958149b2524d4340eeafad8739a33db71df45ed
|
[
"MIT"
] | 9
|
2021-05-27T21:08:23.000Z
|
2021-06-14T20:10:10.000Z
|
src/blip_sdk/extensions/artificial_intelligence/ai_model/content_type.py
|
mirlarof/blip-sdk-python
|
f958149b2524d4340eeafad8739a33db71df45ed
|
[
"MIT"
] | 3
|
2021-06-23T19:53:20.000Z
|
2022-01-04T17:50:44.000Z
|
class ContentType:
"""AI Model content types."""
MODEL_PUBLISHING = 'application/vnd.iris.ai.model-publishing+json'
MODEL_TRAINING = 'application/vnd.iris.ai.model-training+json'
| 27.571429
| 70
| 0.73057
| 24
| 193
| 5.791667
| 0.5
| 0.151079
| 0.258993
| 0.28777
| 0.359712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134715
| 193
| 6
| 71
| 32.166667
| 0.832335
| 0.119171
| 0
| 0
| 0
| 0
| 0.536585
| 0.536585
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
b116f2a82c79adecd67a72b7a044671174148cc9
| 16,205
|
py
|
Python
|
app/tests/api_tests/test_exclusions.py
|
AirWalk-Digital/airview-api
|
1c4ce3569a5e3834d53f937a35ec5d8b0e991cdb
|
[
"Apache-2.0"
] | 2
|
2021-11-29T13:28:42.000Z
|
2021-12-21T14:37:58.000Z
|
app/tests/api_tests/test_exclusions.py
|
AirWalk-Digital/airview-api
|
1c4ce3569a5e3834d53f937a35ec5d8b0e991cdb
|
[
"Apache-2.0"
] | 2
|
2021-11-04T10:24:10.000Z
|
2021-11-12T15:16:20.000Z
|
app/tests/api_tests/test_exclusions.py
|
AirWalk-Digital/airview-api
|
1c4ce3569a5e3834d53f937a35ec5d8b0e991cdb
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import pytest
from pprint import pprint
from airview_api.models import (
TechnicalControlSeverity,
Exclusion,
ExclusionState,
MonitoredResourceState,
SystemStage,
TechnicalControlAction,
)
from tests.common import client
from tests.factories import *
from dateutil import parser
def setup():
reset_factories()
SystemFactory(id=1, name="one", stage=SystemStage.BUILD)
SystemFactory(id=2, name="two", stage=SystemStage.BUILD)
EnvironmentFactory(id=1)
ApplicationFactory(id=11, parent_id=None, name="svc 13", environment_id=1)
ApplicationReferenceFactory(
id=311, application_id=11, type="app-ref", reference="app-11"
)
TechnicalControlFactory(
id=22,
name="ctl1",
reference="control_a",
control_action=TechnicalControlAction.LOG,
system_id=1,
severity=TechnicalControlSeverity.HIGH,
)
TechnicalControlFactory(
id=230,
name="ctl2",
reference="control_5",
control_action=TechnicalControlAction.LOG,
system_id=2,
severity=TechnicalControlSeverity.HIGH,
)
ApplicationTechnicalControlFactory(
id=33, application_id=11, technical_control_id=22
)
ApplicationTechnicalControlFactory(
id=340, application_id=11, technical_control_id=230
)
def add_get_items_to_db():
ExclusionFactory(
id=44,
application_technical_control_id=33,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=55,
exclusion_id=44,
reference="res-a",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=33,
)
# unexpected other data
ApplicationFactory(id=12, parent_id=None, name="svc 13", environment_id=1)
ApplicationReferenceFactory(
id=312, application_id=12, type="app-ref", reference="app-svc-13"
)
ApplicationTechnicalControlFactory(
id=34, application_id=12, technical_control_id=22
)
ExclusionFactory(
id=45,
application_technical_control_id=340,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=56,
exclusion_id=45,
reference="res-5",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=340,
)
MonitoredResourceFactory(
id=57,
exclusion_id=45,
reference="res-6",
exclusion_state=ExclusionState.ACTIVE,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=340,
)
def test_exclusions_post_ok_for_new_resources(client):
"""
Given: An empty exclusions colllection, linked app controls, existing resources
When: When the api is called with an exclusion request
Then: The exclusions request is persisted & linked to existing resources, 201 status
"""
# Arrange
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-a",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-b",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-c", "res-d"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
print(resp.get_json())
# Assert
assert resp.status_code == 201
exclusion = db.session.query(Exclusion).first()
assert exclusion.application_technical_control_id == 33
assert exclusion.summary == data["summary"]
assert exclusion.mitigation == data["mitigation"]
assert exclusion.probability == data["probability"]
assert exclusion.impact == data["impact"]
assert exclusion.is_limited_exclusion == data["isLimitedExclusion"]
assert exclusion.end_date == datetime(2022, 1, 1, 0, 0)
assert exclusion.notes == data["notes"]
assert len(exclusion.resources) == 2
assert exclusion.resources[0].reference == "res-c"
assert exclusion.resources[1].reference == "res-d"
assert exclusion.resources[0].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[1].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[0].exclusion_id == exclusion.id
assert exclusion.resources[1].exclusion_id == exclusion.id
def test_exclusions_post_ok_for_existing_resources(client):
"""
Given: An empty exclusions colllection, linked app controls, existing resources
When: When the api is called with an exclusion request
Then: The exclusions request is persisted & linked to existing resources, 201 status
"""
# Arrange
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-a",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
MonitoredResourceFactory(
application_technical_control_id=33,
reference="res-b",
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-a", "res-b"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
print(resp.get_json())
# Assert
assert resp.status_code == 201
exclusion = db.session.query(Exclusion).first()
assert exclusion.application_technical_control_id == 33
assert exclusion.summary == data["summary"]
assert exclusion.mitigation == data["mitigation"]
assert exclusion.probability == data["probability"]
assert exclusion.impact == data["impact"]
assert exclusion.is_limited_exclusion == data["isLimitedExclusion"]
assert exclusion.end_date == datetime(2022, 1, 1, 0, 0)
assert exclusion.notes == data["notes"]
assert len(exclusion.resources) == 2
assert exclusion.resources[0].reference == "res-a"
assert exclusion.resources[1].reference == "res-b"
assert exclusion.resources[0].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[1].exclusion_state == ExclusionState.PENDING
assert exclusion.resources[0].exclusion_id == exclusion.id
assert exclusion.resources[1].exclusion_id == exclusion.id
def test_exclusions_bad_request_for_missing_app_tech_control(client):
"""
Given: An empty exclusions colllection, unlinked app/controls
When: When the api is called with an exclusion request for missing app tech control
Then: 404, no persistance
"""
# Arrange
data = {
"applicationTechnicalControlId": 999,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-a", "res-b"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
# Assert
assert resp.status_code == 400
assert len(db.session.query(Exclusion).all()) == 0
def test_exclusions_post_bad_request_for_duplicate_resources(client):
"""
Given: An existing exclusion in the db
When: When the api is called with an exclusion request for pre existing resources
Then: 404, no persistance
"""
# Arrange
ExclusionFactory(
id=44,
application_technical_control_id=33,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=55,
exclusion_id=44,
reference="res-a",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=33,
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-a", "res-b"],
"isLimitedExclusion": True,
"endDate": "2022-01-01 00:00:00.000",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
# Assert
assert resp.status_code == 400
assert len(db.session.query(Exclusion).all()) == 1
assert len(db.session.query(MonitoredResource).all()) == 1
def test_exclusions_post_ok_for_different_resources_resources(client):
"""
Given: An existing exclusion in the db
When: When the api is called with an exclusion request for non-existing resources
Then: 201, new exclusion created
"""
# Arrange
ExclusionFactory(
id=44,
application_technical_control_id=33,
summary="sss",
mitigation="mmm",
impact=3,
probability=4,
is_limited_exclusion=True,
end_date=datetime(1, 1, 1),
notes="nnn",
)
MonitoredResourceFactory(
id=55,
exclusion_id=44,
reference="res-a",
exclusion_state=ExclusionState.PENDING,
monitoring_state=MonitoredResourceState.FIXED_AUTO,
last_modified=datetime(1, 1, 1),
last_seen=datetime(2, 1, 1),
application_technical_control_id=33,
)
data = {
"applicationTechnicalControlId": 33,
"summary": "sum a",
"mitigation": "mit b",
"probability": 1,
"impact": 2,
"resources": ["res-b", "res-c"],
"isLimitedExclusion": True,
"endDate": "2022-01-01T00:00:00.000000Z",
"notes": "notes c",
}
# Act
resp = client.post("/exclusions/", json=data)
# Assert
assert resp.status_code == 201
assert len(db.session.query(Exclusion).all()) == 2
assert len(db.session.query(MonitoredResource).all()) == 3
exclusion = db.session.query(Exclusion).filter(Exclusion.id != 44).first()
assert exclusion.application_technical_control_id == 33
assert exclusion.summary == data["summary"]
assert exclusion.mitigation == data["mitigation"]
assert exclusion.probability == data["probability"]
assert exclusion.impact == data["impact"]
assert exclusion.is_limited_exclusion == data["isLimitedExclusion"]
assert exclusion.end_date == datetime(2022, 1, 1, 0, 0)
assert exclusion.notes == data["notes"]
def test_exclusions_get_returns_correct_response(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system
Then: 200, exclusions returned
"""
# Arrange
add_get_items_to_db()
# Act
resp = client.get("/systems/1/exclusion-resources/")
# Assert
data = resp.get_json()
assert resp.status_code == 200
assert len(data) == 1
item = data[0]
assert item["id"] == 55
assert item["technicalControlReference"] == "control_a"
assert item["reference"] == "res-a"
assert item["state"] == "PENDING"
assert len(item["applicationReferences"]) == 1
assert item["applicationReferences"][0]["type"] == "app-ref"
assert item["applicationReferences"][0]["reference"] == "app-11"
def test_exclusions_get_filters_out_by_state(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system
Then: 200, exclusions returned
"""
# Arrange
add_get_items_to_db()
# Act
resp = client.get("/systems/2/exclusion-resources/?state=PENDING")
# Assert
data = resp.get_json()
assert resp.status_code == 200
assert len(data) == 1
item = data[0]
assert item["id"] == 56
def test_exclusions_get_handles_invalid_filter(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system with a bad filter
Then: 200, empty array returned
"""
# Arrange
add_get_items_to_db()
# Act
resp = client.get("/systems/2/exclusion-resources/?state=XXXXX")
# Assert
data = resp.get_json()
assert resp.status_code == 200
assert len(data) == 0
def test_exclusion_resources_put_bad_request_for_id_mismatch(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to update an exclusion resource with url & payload id mismatch
Then: 400, no data changed
"""
# Arrange
add_get_items_to_db()
data = {
"id": 55,
"technicalControlReference": "control_a",
"reference": "res-a",
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/999/", json=data)
# Assert
assert resp.status_code == 400
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.PENDING
def test_exclusion_resources_put_conflict_for_invalid_exclusion(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to update an exclusion which does not yet exist
Then: 409 (conflict), no data changed
"""
# Arrange
add_get_items_to_db()
data = {
"id": 999,
"technicalControlReference": "control_a",
"reference": "res-a",
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/999/", json=data)
# Assert
assert resp.status_code == 409
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.PENDING
def test_exclusion_resources_put_updates_record(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system with a bad filter
Then: 200, empty array returned
"""
# Arrange
add_get_items_to_db()
data = {
"id": 55,
"technicalControlReference": "control_a",
"reference": "res-a",
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/55/", json=data)
# Assert
assert resp.status_code == 204
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.ACTIVE
def test_exclusion_resources_put_updates_record_with_sparse_response(client):
"""
Given: Existing exclusion & resources in the db
When: When the api is called to get exclusions by system with a bad filter
Then: 200, empty array returned
"""
# Arrange
add_get_items_to_db()
data = {
"id": 55,
"state": "ACTIVE",
}
# Act
resp = client.put("/exclusion-resources/55/", json=data)
# Assert
assert resp.status_code == 204
item = db.session.query(MonitoredResource).get(55)
assert item.exclusion_state == ExclusionState.ACTIVE
| 29.679487
| 95
| 0.651466
| 1,858
| 16,205
| 5.530678
| 0.113025
| 0.007396
| 0.033281
| 0.045154
| 0.837485
| 0.813352
| 0.775691
| 0.758661
| 0.758369
| 0.758369
| 0
| 0.038095
| 0.235421
| 16,205
| 545
| 96
| 29.733945
| 0.791283
| 0.137612
| 0
| 0.697548
| 0
| 0
| 0.127727
| 0.046113
| 0
| 0
| 0
| 0
| 0.190736
| 1
| 0.038147
| false
| 0
| 0.019074
| 0
| 0.057221
| 0.008174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b199e4aa85b331e316ae79f240eb1f10867adc67
| 101
|
py
|
Python
|
jumpscale/data/encryption/exceptions.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 2
|
2021-04-28T10:46:08.000Z
|
2021-12-22T12:33:34.000Z
|
jumpscale/data/encryption/exceptions.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 321
|
2020-06-15T11:48:21.000Z
|
2022-03-29T22:13:33.000Z
|
jumpscale/data/encryption/exceptions.py
|
zaibon/js-ng
|
8b63c04757d1432ed4aa588500a113610701de14
|
[
"Apache-2.0"
] | 4
|
2020-06-18T06:19:29.000Z
|
2021-07-14T12:54:47.000Z
|
from jumpscale.core.exceptions import JSException
class FailedChecksumError(JSException):
pass
| 16.833333
| 49
| 0.821782
| 10
| 101
| 8.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 5
| 50
| 20.2
| 0.943182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
493a6d41686d9c19058d1d6518e55e607bd5d8ff
| 49
|
py
|
Python
|
ivadomed/__init__.py
|
AshkanTaghipour/ivadomed
|
84c4e01831265b311c7b053ffdb19fb393fb135d
|
[
"MIT"
] | 87
|
2020-06-19T16:43:36.000Z
|
2022-03-23T03:07:50.000Z
|
ivadomed/__init__.py
|
AshkanTaghipour/ivadomed
|
84c4e01831265b311c7b053ffdb19fb393fb135d
|
[
"MIT"
] | 778
|
2020-06-21T22:42:55.000Z
|
2022-03-31T19:03:54.000Z
|
ivadomed/__init__.py
|
AshkanTaghipour/ivadomed
|
84c4e01831265b311c7b053ffdb19fb393fb135d
|
[
"MIT"
] | 282
|
2020-07-25T18:01:08.000Z
|
2022-02-01T23:35:54.000Z
|
from .utils import __version__, __ivadomed_dir__
| 24.5
| 48
| 0.857143
| 6
| 49
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 1
| 49
| 49
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4987ea3829b0d9d5d60339b6963bc5015d3183b8
| 200
|
py
|
Python
|
nbdev_try/core.py
|
alinaselega/nbdev_try
|
b596ec513a793079ff11f103319084abe20d2a08
|
[
"Apache-2.0"
] | 2
|
2021-06-10T17:52:03.000Z
|
2021-06-14T13:40:15.000Z
|
nbdev_try/core.py
|
alinaselega/nbdev_try
|
b596ec513a793079ff11f103319084abe20d2a08
|
[
"Apache-2.0"
] | null | null | null |
nbdev_try/core.py
|
alinaselega/nbdev_try
|
b596ec513a793079ff11f103319084abe20d2a08
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['print_hello']
# Cell
def print_hello(to):
"Print hello to the user"
return f"Hello, {to}!"
| 25
| 87
| 0.695
| 30
| 200
| 4.4
| 0.7
| 0.227273
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.18
| 200
| 8
| 88
| 25
| 0.792683
| 0.575
| 0
| 0
| 1
| 0
| 0.425926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
7728320dc4ede397dce7c48749cf18bc1ac646f1
| 157
|
py
|
Python
|
py2diagrams/parsers/base.py
|
RomAviad/py2diagrams
|
1ec096014d71207f639ca4387786721a974e0d39
|
[
"MIT"
] | 1
|
2019-03-05T11:24:03.000Z
|
2019-03-05T11:24:03.000Z
|
py2diagrams/parsers/base.py
|
RomAviad/py2diagrams
|
1ec096014d71207f639ca4387786721a974e0d39
|
[
"MIT"
] | null | null | null |
py2diagrams/parsers/base.py
|
RomAviad/py2diagrams
|
1ec096014d71207f639ca4387786721a974e0d39
|
[
"MIT"
] | null | null | null |
class BaseAnalyzer(object):
def __init__(self, base_node):
self.base_node = base_node
def analyze(self):
raise NotImplementedError()
| 26.166667
| 35
| 0.681529
| 18
| 157
| 5.555556
| 0.611111
| 0.24
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229299
| 157
| 6
| 35
| 26.166667
| 0.826446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
6224a3980ea8af4fdb5796c6a1e55fa0c8db6ed8
| 44
|
py
|
Python
|
app/__main__.py
|
theirix/hilinkmon
|
bac73e5e11ebf132ef33aaede6eb0b2050f8807c
|
[
"BSD-3-Clause"
] | null | null | null |
app/__main__.py
|
theirix/hilinkmon
|
bac73e5e11ebf132ef33aaede6eb0b2050f8807c
|
[
"BSD-3-Clause"
] | null | null | null |
app/__main__.py
|
theirix/hilinkmon
|
bac73e5e11ebf132ef33aaede6eb0b2050f8807c
|
[
"BSD-3-Clause"
] | null | null | null |
from .monitor import main_loop
main_loop()
| 11
| 30
| 0.795455
| 7
| 44
| 4.714286
| 0.714286
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 44
| 3
| 31
| 14.666667
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
623563756767a196e6978f1ef9e7fcb0e97aa59c
| 26,139
|
py
|
Python
|
tests/unit/controller/test_user_otp.py
|
ryanlerch/securitas
|
2eba843ed5e0f6c95a342b8b152bae943216e48c
|
[
"MIT"
] | 1
|
2020-01-16T12:38:53.000Z
|
2020-01-16T12:38:53.000Z
|
tests/unit/controller/test_user_otp.py
|
ryanlerch/securitas
|
2eba843ed5e0f6c95a342b8b152bae943216e48c
|
[
"MIT"
] | 113
|
2020-01-08T10:46:14.000Z
|
2020-02-26T09:51:09.000Z
|
tests/unit/controller/test_user_otp.py
|
ryanlerch/securitas
|
2eba843ed5e0f6c95a342b8b152bae943216e48c
|
[
"MIT"
] | 8
|
2020-01-08T11:46:02.000Z
|
2020-02-21T19:22:41.000Z
|
from unittest import mock
from urllib.parse import parse_qs, urlparse
import pytest
import python_freeipa
from bs4 import BeautifulSoup
from pyotp import TOTP
from noggin.app import ipa_admin
from noggin.representation.otptoken import OTPToken
from ..utilities import (
assert_form_field_error,
assert_form_generic_error,
assert_redirects_with_flash,
get_otp,
otp_secret_from_uri,
)
@pytest.fixture
def dummy_user_with_2_otp(client, logged_in_dummy_user, logged_in_dummy_user_with_otp):
ipa = logged_in_dummy_user
result = ipa.otptoken_add(
o_ipatokenowner="dummy",
o_description="dummy's other token",
)['result']
token = OTPToken(result)
yield logged_in_dummy_user_with_otp, token
try:
ipa_admin.otptoken_del(token.uniqueid)
except python_freeipa.exceptions.NotFound:
pass # already deleted, it's fine.
@pytest.fixture
def totp_token():
return TOTP("BJ3F2NQ2CADX6ZOEDGGKATDQMVTKY3XLC73ASUHIBVGGGWJJOYFXIFIT")
@pytest.mark.vcr()
def test_user_settings_otp(client, logged_in_dummy_user):
"""Test getting the user OTP settings page: /user/<username>/settings/otp/"""
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
assert page.title
assert page.title.string == "Settings for dummy - noggin"
# check the pageheading
pageheading = page.select("#pageheading")[0]
assert pageheading.get_text(strip=True) == "OTP Tokens"
# check that there arent any tokens
tokenlist = page.select("div.list-group")
assert len(tokenlist) == 1
assert (
tokenlist[0].select(".list-group-item")[0].get_text(strip=True)
== "You have no OTP tokensAdd an OTP token to enable two-factor "
"authentication on your account."
)
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
assert page.title
assert page.title.string == "Settings for dummy - noggin"
form = page.select("form[action='/user/dummy/settings/otp/']")
assert len(form) == 1
@pytest.mark.vcr()
def test_user_settings_otp_no_permission(client, logged_in_dummy_user):
"""Verify that a user's OTP settings page can't be viewed by another user."""
result = client.get("/user/dudemcpants/settings/otp/")
assert_redirects_with_flash(
result,
expected_url="/user/dudemcpants/",
expected_message="You do not have permission to edit this account.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_add(client, logged_in_dummy_user, cleanup_dummy_tokens):
"""Test the first step of OTP creation"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"add-description": "pants token",
"add-password": "dummy_password",
"add-submit": "1",
},
)
page = BeautifulSoup(result.data, "html.parser")
# The token has not been added yet
tokenlist = page.select_one("div.list-group")
assert tokenlist is not None
assert "You have no OTP tokens" in tokenlist.get_text(strip=True)
# check the modal is on the page
modal = page.select_one("#otp-modal")
assert modal is not None
# check the next step form is properly pre-filled
confirm_form = modal.select_one("form")
assert confirm_form is not None
assert (
confirm_form.select_one("input[name='confirm-description']")["value"]
== "pants token"
)
otp_uri = page.select_one("input#otp-uri")
parsed_otp_uri_query = parse_qs(urlparse(otp_uri["value"]).query)
assert (
confirm_form.select_one("input[name='confirm-secret']")["value"]
== parsed_otp_uri_query["secret"][0]
)
@pytest.mark.vcr()
def test_user_settings_otp_confirm(
client, logged_in_dummy_user, cleanup_dummy_tokens, totp_token
):
"""Test OTP creation"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"confirm-description": "pants token",
"confirm-secret": totp_token.secret,
"confirm-code": totp_token.now(),
"confirm-submit": "1",
},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="The token has been created.",
expected_category="success",
)
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select_one("div.list-group")
assert tokenlist is not None
# check this is not the no tokens message
assert "You have no OTP tokens" not in tokenlist.get_text(strip=True)
# check we are showing 1 token
tokens = tokenlist.select(".list-group-item .col")
assert len(tokens) == 1
# check the token is in the list
description = tokens[0].select_one("div[data-role='token-description']")
assert description is not None
assert description.get_text(strip=True) == "pants token"
# check the modal is closed
assert page.select_one("#otp-modal") is None
@pytest.mark.vcr()
def test_user_settings_otp_add_second(
client, logged_in_dummy_user_with_otp, cleanup_dummy_tokens
):
"""Test posting to the create OTP endpoint"""
otp = get_otp(otp_secret_from_uri(logged_in_dummy_user_with_otp.uri))
result = client.post(
"/user/dummy/settings/otp/",
data={
"add-description": "pants token 2",
"add-password": "dummy_password",
"add-otp": otp,
"add-submit": "1",
},
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select_one("div.list-group")
assert tokenlist is not None
tokens = tokenlist.select(".list-group-item div[data-role='token-description']")
assert len(tokens) == 1
modal = page.select_one("#otp-modal")
assert modal is not None
confirm_form = modal.select_one("form")
assert confirm_form is not None
assert (
confirm_form.select_one("input[name='confirm-description']")["value"]
== "pants token 2"
)
otp_uri = page.select_one("input#otp-uri")
parsed_otp_uri_query = parse_qs(urlparse(otp_uri["value"]).query)
assert (
confirm_form.select_one("input[name='confirm-secret']")["value"]
== parsed_otp_uri_query["secret"][0]
)
@pytest.mark.vcr()
def test_user_settings_otp_add_second_confirm(
client,
logged_in_dummy_user_with_otp,
cleanup_dummy_tokens,
totp_token,
):
"""Test posting to the create OTP endpoint"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"confirm-description": "pants token",
"confirm-secret": totp_token.secret,
"confirm-code": totp_token.now(),
"confirm-submit": "1",
},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select_one("div.list-group")
assert tokenlist is not None
# check we are showing 2 tokens
tokens = tokenlist.select(".list-group-item div[data-role='token-description']")
assert len(tokens) == 2
# check the 2nd token is in the list
assert tokens[1].get_text(strip=True) == "pants token"
# check the modal is closed
assert page.select_one("#otp-modal") is None
@pytest.mark.vcr()
def test_user_settings_otp_check_no_description(
client, logged_in_dummy_user, cleanup_dummy_tokens, totp_token
):
"""Test an OTP token without a description"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"confirm-secret": totp_token.secret,
"confirm-code": totp_token.now(),
"confirm-submit": "1",
},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select_one("div.list-group")
assert tokenlist is not None
tokens = tokenlist.select(".list-group-item div[data-role='token-description']")
assert len(tokens) == 1
assert tokens[0].get_text(strip=True) == "(no name)"
@pytest.mark.vcr()
def test_user_settings_otp_check_description_escaping(
client, logged_in_dummy_user, cleanup_dummy_tokens
):
"""Test that we escape the token description when constructing the OTP URI"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"add-description": "pants token",
"add-password": "dummy_password",
"add-submit": "1",
},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
otp_uri = page.select_one("input#otp-uri")
print(page.prettify())
assert otp_uri is not None
parsed_otp_uri = urlparse(otp_uri["value"])
# Not sure we need all of these checked
assert parsed_otp_uri.scheme == "otpauth"
assert parsed_otp_uri.netloc == "totp"
assert parsed_otp_uri.path == "/dummy%40NOGGIN.TEST:pants%20token"
parsed_query = parse_qs(parsed_otp_uri.query)
assert parsed_query["issuer"] == ["dummy@NOGGIN.TEST"]
@pytest.mark.vcr()
def test_user_settings_otp_add_no_permission(client, logged_in_dummy_user, totp_token):
"""Verify that another user can't make an otp token."""
result = client.post(
"/user/dudemcpants/settings/otp/",
data={
"confirm-description": "pants token",
"confirm-secret": totp_token.secret,
"confirm-code": totp_token.now(),
"confirm-submit": "1",
},
)
assert_redirects_with_flash(
result,
expected_url="/user/dudemcpants/",
expected_message="You do not have permission to edit this account.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_add_invalid_form(client, logged_in_dummy_user):
"""Test an invalid form when adding an otp token"""
result = client.post("/user/dummy/settings/otp/", data={"add-submit": "1"})
assert_form_field_error(result, "add-password", "You must provide a password")
@pytest.mark.vcr()
def test_user_settings_otp_add_wrong_password(client, logged_in_dummy_user):
"""Test adding an otp token with the wrong password"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"add-description": "pants token",
"add-password": "pants",
"add-submit": "1",
},
)
assert_form_field_error(result, "add-password", "Incorrect password")
@pytest.mark.vcr()
def test_user_settings_otp_add_wrong_code(client, logged_in_dummy_user, totp_token):
"""Test failure when adding an otptoken"""
result = client.post(
"/user/dummy/settings/otp/",
data={
"confirm-description": "pants token",
"confirm-secret": totp_token.secret,
"confirm-code": "123456",
"confirm-submit": "1",
},
)
assert_form_field_error(
result, "confirm-code", "The code is wrong, please try again."
)
@pytest.mark.vcr()
def test_user_settings_otp_add_invalid(client, logged_in_dummy_user, totp_token):
"""Test failure when adding an otptoken"""
with mock.patch("noggin.security.ipa.Client.otptoken_add") as method:
method.side_effect = python_freeipa.exceptions.ValidationError(
message={
"member": {"user": [("testuser", "something went wrong")], "group": []}
},
code="4242",
)
result = client.post(
"/user/dummy/settings/otp/",
data={
"confirm-description": "pants token",
"confirm-secret": totp_token.secret,
"confirm-code": totp_token.now(),
"confirm-submit": "1",
},
)
assert_form_generic_error(result, expected_message="Cannot create the token.")
@pytest.mark.vcr()
def test_user_settings_otp_disable_no_permission(client, logged_in_dummy_user):
"""Verify that another user can't disable an otp token."""
result = client.post(
"/user/dudemcpants/settings/otp/disable/",
data={"description": "pants token", "password": "dummy_password"},
)
assert_redirects_with_flash(
result,
expected_url="/user/dudemcpants/",
expected_message="You do not have permission to edit this account.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_disable_invalid_form(client, logged_in_dummy_user):
"""Test an invalid form when disabling an otp token"""
result = client.post("/user/dummy/settings/otp/disable/", data={})
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Token must not be empty",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_disable_ipaerror(
client, logged_in_dummy_user, dummy_user_with_2_otp
):
"""Test failure when disabling an otptoken"""
with mock.patch("noggin.security.ipa.Client.otptoken_mod") as method:
method.side_effect = python_freeipa.exceptions.FreeIPAError(
message="Cannot disable the token.", code="4242"
)
result = client.post(
"/user/dummy/settings/otp/disable/",
data={"token": dummy_user_with_2_otp[1].uniqueid},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Cannot disable the token.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_disable(client, logged_in_dummy_user, dummy_user_with_2_otp):
"""Test deleting an otptoken"""
# add another OTP Token
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 2 tokens
assert len(tokenlist) == 2
# grab the id of the first token
tokenid = tokenlist[0].select(".text-monospace")[0].get_text(strip=True)
# disable that token
result = client.post(
"/user/dummy/settings/otp/disable/",
data={"token": tokenid},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
# check we are still showing 2 item
assert len(tokenlist) == 2
@pytest.mark.vcr()
def test_user_settings_otp_disable_lasttoken(client, logged_in_dummy_user_with_otp):
"""Test trying to disable the last token"""
result = client.post(
"/user/dummy/settings/otp/disable/",
data={"token": logged_in_dummy_user_with_otp.uniqueid},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Sorry, You cannot disable your last active token.",
expected_category="warning",
)
@pytest.mark.vcr()
def test_user_settings_otp_disable_ipabadrequest(
client, logged_in_dummy_user, dummy_user_with_2_otp
):
"""Test IPA badrequest failure when disabling an otptoken"""
with mock.patch("noggin.security.ipa.Client.otptoken_mod") as method:
method.side_effect = python_freeipa.exceptions.BadRequest(
message="Cannot delete the token.", code="4242"
)
result = client.post(
"/user/dummy/settings/otp/disable/",
data={"token": "0be795bd-b7d3-49b2-89d7-889522d7f1ba"},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Cannot disable the token.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_delete_no_permission(client, logged_in_dummy_user):
"""Verify that another user can't delete an otp token."""
result = client.post(
"/user/dudemcpants/settings/otp/delete/", data={"token": "aabbcc-aabbcc"}
)
assert_redirects_with_flash(
result,
expected_url="/user/dudemcpants/",
expected_message="You do not have permission to edit this account.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_delete_invalid_form(client, logged_in_dummy_user):
"""Test an invalid form when deleting an otp token"""
result = client.post("/user/dummy/settings/otp/delete/", data={})
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Token must not be empty",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_delete_ipafailure(
client, logged_in_dummy_user, dummy_user_with_2_otp
):
"""Test IPA failure when deleting an otptoken"""
with mock.patch("noggin.security.ipa.Client.otptoken_del") as method:
method.side_effect = python_freeipa.exceptions.FreeIPAError(
message="Cannot delete the token.", code="4242"
)
result = client.post(
"/user/dummy/settings/otp/delete/",
data={"token": "0be795bd-b7d3-49b2-89d7-889522d7f1ba"},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Cannot delete the token.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_delete_ipabadrequest(
client, logged_in_dummy_user, dummy_user_with_2_otp
):
"""Test IPA badrequest failure when deleting an otptoken"""
with mock.patch("noggin.security.ipa.Client.otptoken_del") as method:
method.side_effect = python_freeipa.exceptions.BadRequest(
message="Cannot delete the token.", code="4242"
)
result = client.post(
"/user/dummy/settings/otp/delete/",
data={"token": "0be795bd-b7d3-49b2-89d7-889522d7f1ba"},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Cannot delete the token.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_delete(client, logged_in_dummy_user, dummy_user_with_2_otp):
"""Test deleting an otptoken"""
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 2 tokens
assert len(tokenlist) == 2
# grab the id of the first token
tokenid = tokenlist[0].select(".text-monospace")[0].get_text(strip=True)
# delete that token
result = client.post(
"/user/dummy/settings/otp/delete/",
data={"token": tokenid},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 1 item
assert len(tokenlist) == 1
# check the one item is not the no tokens message
assert "You have no OTP tokens" not in tokenlist[0].get_text(strip=True)
@pytest.mark.vcr()
def test_user_settings_otp_delete_lasttoken(
client, logged_in_dummy_user, logged_in_dummy_user_with_otp
):
"""Test trying to delete the last token"""
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 1 token
assert len(tokenlist) == 1
# check the one item is not the no tokens message
assert "You have no OTP tokens" not in tokenlist[0].get_text(strip=True)
# grab the id of the token
tokenid = tokenlist[0].select(".text-monospace")[0].get_text(strip=True)
# try to delete that token
result = client.post("/user/dummy/settings/otp/delete/", data={"token": tokenid})
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Sorry, You cannot delete your last active token.",
expected_category="warning",
)
@pytest.mark.vcr()
def test_user_settings_otp_enable_no_permission(client, logged_in_dummy_user):
"""Verify that another user can't enable an otp token."""
result = client.post(
"/user/dudemcpants/settings/otp/enable/",
data={"description": "pants token", "password": "dummy_password"},
)
assert_redirects_with_flash(
result,
expected_url="/user/dudemcpants/",
expected_message="You do not have permission to edit this account.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_enable_invalid_form(client, logged_in_dummy_user):
"""Test an invalid form when enabling an otp token"""
result = client.post("/user/dummy/settings/otp/enable/", data={})
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Token must not be empty",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_enable_ipaerror(
client, logged_in_dummy_user, dummy_user_with_2_otp
):
"""Test failure when enabling an otptoken"""
with mock.patch("noggin.security.ipa.Client.otptoken_mod") as method:
method.side_effect = python_freeipa.exceptions.FreeIPAError(
message="Cannot enable the token.", code="4242"
)
result = client.post(
"/user/dummy/settings/otp/enable/",
data={"token": dummy_user_with_2_otp[1].uniqueid},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Cannot enable the token. Cannot enable the token.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_enable(client, logged_in_dummy_user, dummy_user_with_2_otp):
"""Test enabling an otptoken"""
# add another OTP Token
result = client.get("/user/dummy/settings/otp/")
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 2 tokens
assert len(tokenlist) == 2
# grab the id of the first token
tokenid = tokenlist[0].select(".text-monospace")[0].get_text(strip=True)
# disable that token
result = client.post(
"/user/dummy/settings/otp/disable/",
data={"token": tokenid},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
# select all the tokens, disabled and enabled
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 2 tokens
assert len(tokenlist) == 2
# select just the disabled tokens
tokenlist = page.select("div.list-group .list-group-item.text-muted")
# check we are showing 1 disabled item
assert len(tokenlist) == 1
# enable that token
result = client.post(
"/user/dummy/settings/otp/enable/",
data={"token": tokenid},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
# select all the tokens, disabled and enabled
tokenlist = page.select("div.list-group .list-group-item")
# check we are showing 2 tokens
assert len(tokenlist) == 2
# try to select just the disabled tokens
tokenlist = page.select("div.list-group .list-group-item.text-muted")
# check we are showing 0 disabled tokens
assert len(tokenlist) == 0
@pytest.mark.vcr()
def test_user_settings_otp_rename(client, logged_in_dummy_user_with_otp):
"""Test renaming an otp token"""
tokenid = logged_in_dummy_user_with_otp.uniqueid
# rename the token
result = client.post(
"/user/dummy/settings/otp/rename/",
data={"token": tokenid, "description": "the new name"},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
assert len(tokenlist) == 1
desc = (
tokenlist[0]
.select("div[data-role='token-description']")[0]
.get_text(strip=True)
)
assert desc == "the new name"
@pytest.mark.vcr()
def test_user_settings_otp_rename_no_change(client, logged_in_dummy_user_with_otp):
"""Test renaming an otp token with no actual change"""
tokenid = logged_in_dummy_user_with_otp.uniqueid
desc = logged_in_dummy_user_with_otp.description
result = client.post(
"/user/dummy/settings/otp/rename/",
data={"token": tokenid, "description": desc},
follow_redirects=True,
)
page = BeautifulSoup(result.data, "html.parser")
tokenlist = page.select("div.list-group .list-group-item")
assert len(tokenlist) == 1
new_desc = (
tokenlist[0]
.select("div[data-role='token-description']")[0]
.get_text(strip=True)
)
assert new_desc == desc
@pytest.mark.vcr()
def test_user_settings_otp_rename_ipaerror(client, logged_in_dummy_user_with_otp):
"""Test failure when renaming an otptoken"""
tokenid = logged_in_dummy_user_with_otp.uniqueid
with mock.patch("noggin.security.ipa.Client.otptoken_mod") as method:
method.side_effect = python_freeipa.exceptions.FreeIPAError(
message="Whoops", code="4242"
)
result = client.post(
"/user/dummy/settings/otp/rename/",
data={"token": tokenid},
)
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Cannot rename the token.",
expected_category="danger",
)
@pytest.mark.vcr()
def test_user_settings_otp_rename_invalid_form(client, logged_in_dummy_user_with_otp):
"""Test an invalid form when renaming an otp token"""
result = client.post("/user/dummy/settings/otp/rename/", data={})
assert_redirects_with_flash(
result,
expected_url="/user/dummy/settings/otp/",
expected_message="Token must not be empty",
expected_category="danger",
)
| 33.640927
| 88
| 0.664869
| 3,337
| 26,139
| 5.007192
| 0.077015
| 0.057933
| 0.049853
| 0.058651
| 0.860375
| 0.84643
| 0.823867
| 0.807828
| 0.786283
| 0.747202
| 0
| 0.008994
| 0.213053
| 26,139
| 776
| 89
| 33.684278
| 0.803306
| 0.105551
| 0
| 0.639794
| 0
| 0
| 0.247357
| 0.103569
| 0
| 0
| 0
| 0
| 0.135506
| 1
| 0.060034
| false
| 0.017153
| 0.015437
| 0.001715
| 0.077187
| 0.001715
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6265b676f2cd910da3dc0fd5da5f393a3e86998d
| 92
|
py
|
Python
|
scaffold/views.py
|
gauribhoite/myblogproject
|
1716a332978bc6b7a74ae566c4964d104341b69d
|
[
"Apache-2.0"
] | null | null | null |
scaffold/views.py
|
gauribhoite/myblogproject
|
1716a332978bc6b7a74ae566c4964d104341b69d
|
[
"Apache-2.0"
] | null | null | null |
scaffold/views.py
|
gauribhoite/myblogproject
|
1716a332978bc6b7a74ae566c4964d104341b69d
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
def home(request):
return render (request,"base.html")
| 23
| 36
| 0.782609
| 13
| 92
| 5.538462
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 92
| 4
| 36
| 23
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6282fc7dca38eb2aa625315f335ac11bdd1fa26a
| 14,749
|
py
|
Python
|
tests/test_tsdf_integrate_warped.py
|
Algomorph/NeuralTracking
|
6312be8e18828344c65e25a423c239efcd3428dd
|
[
"Apache-2.0"
] | 3
|
2021-04-18T04:23:08.000Z
|
2022-02-01T08:37:51.000Z
|
tests/test_tsdf_integrate_warped.py
|
Algomorph/NeuralTracking
|
6312be8e18828344c65e25a423c239efcd3428dd
|
[
"Apache-2.0"
] | 24
|
2021-05-28T21:59:11.000Z
|
2022-02-03T16:09:41.000Z
|
tests/test_tsdf_integrate_warped.py
|
Algomorph/NeuralTracking
|
6312be8e18828344c65e25a423c239efcd3428dd
|
[
"Apache-2.0"
] | 5
|
2021-03-10T02:56:16.000Z
|
2021-12-14T06:04:50.000Z
|
import math
from typing import Tuple
import numpy as np
import open3d as o3d
import open3d.core as o3c
import pytest
from dq3d import quat, dualquat
import nnrt
import nnrt.geometry as nnrt_geom
from image_processing import compute_normals
from image_processing.numba_cuda.preprocessing import cuda_compute_normal
from image_processing.numpy_cpu.preprocessing import cpu_compute_normal
def generate_xy_plane_depth_image(resolution: Tuple[int, int], depth: int) -> np.ndarray:
image = np.ones(resolution, dtype=np.uint16) * depth
return image
def generate_xy_plane_color_image(resolution: Tuple[int, int], value: Tuple[int, int, int]) -> np.ndarray:
image = np.ndarray((resolution[0], resolution[1], 3), dtype=np.uint8)
image[:, :] = value
return image
def construct_intrinsic_matrix1_3x3():
intrinsics = np.eye(3, dtype=np.float32)
intrinsics[0, 0] = 100.0
intrinsics[1, 1] = 100.0
intrinsics[0, 2] = 50.0
intrinsics[1, 2] = 50.0
return intrinsics
def construct_intrinsic_matrix1_4x4():
intrinsics = np.eye(4, dtype=np.float32)
intrinsics[0, 0] = 100.0
intrinsics[1, 1] = 100.0
intrinsics[0, 2] = 50.0
intrinsics[1, 2] = 50.0
return intrinsics
def construct_test_volume1(device=o3d.core.Device('cuda:0')):
# initialize volume
voxel_size = 0.01 # 1 cm voxel size
sdf_truncation_distance = 0.02 # truncation distance = 2cm
block_resolution = 8 # 8^3 voxel blocks
initial_block_count = 128 # initially allocated number of voxel blocks
volume = nnrt.geometry.WarpableTSDFVoxelGrid(
{
'tsdf': o3d.core.Dtype.Float32,
'weight': o3d.core.Dtype.UInt16,
'color': o3d.core.Dtype.UInt16
},
voxel_size=voxel_size,
sdf_trunc=sdf_truncation_distance,
block_resolution=block_resolution,
block_count=initial_block_count,
device=device)
# generate image
image_width = 100
image_height = 100
image_resolution = (image_width, image_height)
depth = 50 # mm
depth_image = generate_xy_plane_depth_image(image_resolution, depth)
depth_image_gpu = o3d.t.geometry.Image(o3c.Tensor(depth_image, device=device))
value = (100, 100, 100)
color_image = generate_xy_plane_color_image(image_resolution, value)
color_image_gpu = o3d.t.geometry.Image(o3c.Tensor(color_image, device=device))
# set up matrix parameters
intrinsics = construct_intrinsic_matrix1_3x3()
intrinsics_open3d_gpu = o3c.Tensor(intrinsics, device=device)
extrinsics_open3d_gpu = o3c.Tensor(np.eye(4, dtype=np.float32), device=device)
# integrate volume
volume.integrate(depth_image_gpu, color_image_gpu, intrinsics_open3d_gpu, extrinsics_open3d_gpu, 1000.0, 3.0)
return volume
@pytest.mark.parametrize("device", [o3d.core.Device('cuda:0'), o3d.core.Device('cpu:0')])
def test_integrate_warped_simple_motion_dq(device):
camera_rotation = np.ascontiguousarray(np.eye(3, dtype=np.float32))
camera_translation = np.ascontiguousarray(np.zeros(3, dtype=np.float32))
# we need at least four nodes this time, otherwise psdf computation will consider voxel invalid and produce "NaN".
# Make it five.
nodes = np.array([[0.0, 0.0, 0.05],
[0.02, 0.0, 0.05],
[-0.02, 0.0, 0.05],
[0.00, 0.02, 0.05],
[0.00, -0.02, 0.05]],
dtype=np.float32)
volume = construct_test_volume1(device)
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np_originals = voxel_tsdf_and_weights.cpu().numpy()
# the first node moves 1 cm along the negative z axis (towards the camera).
node_dual_quaternions_dq3d = [dualquat(quat.identity(), quat(1.0, 0.0, 0.0, -0.005))] + [dualquat(quat.identity())] * (len(nodes) - 1)
node_dual_quaternions = np.array([np.concatenate((dq.real.data, dq.dual.data)) for dq in node_dual_quaternions_dq3d])
depth = 50 # mm
image_width = 100
image_height = 100
image_resolution = (image_width, image_height)
depth_image = generate_xy_plane_depth_image(image_resolution, depth)
# let's imagine that the central surface point is 1 cm closer to the camera as well, so we alter the depth
# to 40 mm there. Make the motion cease at the other four nodes, e.g. their depth should remain at 50.
# We can make a radial "pinch" in the center of the depth image.
# For our predefined camera, 1 px = 0.005 m, and the nodes are around the 0.002 m radius,
# which puts our pixel radius at 0.002 / 0.0005 = 40 px
pinch_diameter = 40
pinch_radius = pinch_diameter // 2
pinch_height = 10
y_coordinates = np.linspace(-1, 1, pinch_diameter)[None, :] * pinch_height
x_coordinates = np.linspace(-1, 1, pinch_diameter)[:, None] * pinch_height
delta = -pinch_height + np.sqrt(x_coordinates ** 2 + y_coordinates ** 2)
half_image_width = image_width // 2
half_image_height = image_height // 2
# @formatter:off
depth_image[half_image_height - pinch_radius:half_image_height + pinch_radius,
half_image_width - pinch_radius:half_image_width + pinch_radius] += np.round(delta).astype(np.uint16)
# @formatter:on
# ---- compute normals ----
intrinsic_matrix = construct_intrinsic_matrix1_3x3()
fx, fy, cx, cy = intrinsic_matrix[0, 0], intrinsic_matrix[1, 1], intrinsic_matrix[0, 2], intrinsic_matrix[1, 2]
point_image = nnrt.backproject_depth_ushort(depth_image, fx, fy, cx, cy, 1000.0)
normals = compute_normals(device, point_image)
# ---- compute updates ----
truncation_distance = 0.02 # same value as in construct_test_volume1
node_coverage = 0.05
depth_image_o3d = o3d.t.geometry.Image.from_legacy_image(o3d.geometry.Image(depth_image), device=device)
normals_o3d = o3c.Tensor(normals, dtype=o3c.Dtype.Float32, device=device)
intrinsic_matrix_o3d = o3c.Tensor(intrinsic_matrix, dtype=o3c.Dtype.Float32, device=device)
extrinsic_matrix_o3d = o3c.Tensor.eye(4, dtype=o3c.Dtype.Float32, device=device)
node_dual_quaternions_o3d = o3c.Tensor(node_dual_quaternions, dtype=o3c.Dtype.Float32, device=device)
nodes_o3d = o3c.Tensor(nodes, dtype=o3c.Dtype.Float32, device=device)
node_edges_o3d = o3c.Tensor((1, 1))
cos_voxel_ray_to_normal = volume.integrate_warped_dq(
depth_image_o3d, normals_o3d, intrinsic_matrix_o3d, extrinsic_matrix_o3d,
nodes_o3d, node_edges_o3d, node_dual_quaternions_o3d, node_coverage,
anchor_count=4, minimum_valid_anchor_count=3, depth_scale=1000.0, depth_max=3.0,
compute_anchors_using=nnrt_geom.AnchorComputationMethod.EUCLIDEAN, use_node_distance_thresholding=False)
cos_voxel_ray_to_normal = np.squeeze(cos_voxel_ray_to_normal.cpu().numpy())
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np = voxel_tsdf_and_weights.cpu().numpy()
# voxel in the center of the plane is at 0, 0, 0.05,
# which should coincide with the first and only node
# voxel global position is (0, 0, 5) (in voxels)
# voxel is, presumably, in block 3
# voxel's index in block 0 is 5 * (8*8) = 320
# each block holds 512 voxels
center_plane_voxel_index = 512 + 512 + 512 + 320
indices_to_test = [center_plane_voxel_index,
center_plane_voxel_index + 1,
center_plane_voxel_index + 8,
center_plane_voxel_index + 16,
center_plane_voxel_index + 64]
# generated using the above function.
# Note: if anything about the reference implementation changes, these residuals need to be re-computed.
# each array row contains:
# u, v, cosine, tsdf, weight
ground_truth_data = np.array([
[50, 50, 0.4970065653324127, 0.0, 0.0],
[71, 50, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 71, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 92, 0.9215041958391356, 0.06362117264804237, 2.0],
[50, 50, 0.4970065653324127, 0.0, 0.0]
])
def check_voxel_at(index, ground_truth):
assert math.isclose(cos_voxel_ray_to_normal[int(ground_truth[0]), int(ground_truth[1])], ground_truth[2], abs_tol=1e-7)
if ground_truth[2] > 0.5:
assert np.allclose(voxel_tsdf_and_weights_np[index], ground_truth[3:])
for index, ground_truth in zip(indices_to_test, ground_truth_data):
check_voxel_at(index, ground_truth)
@pytest.mark.parametrize("device", [o3d.core.Device('cuda:0'), o3d.core.Device('cpu:0')])
def test_integrate_warped_simple_motion_mat(device):
camera_rotation = np.ascontiguousarray(np.eye(3, dtype=np.float32))
camera_translation = np.ascontiguousarray(np.zeros(3, dtype=np.float32))
# we need at least four nodes this time, otherwise psdf computation will consider voxel invalid and produce "NaN".
# Make it five.
nodes = np.array([[0.0, 0.0, 0.05],
[0.02, 0.0, 0.05],
[-0.02, 0.0, 0.05],
[0.00, 0.02, 0.05],
[0.00, -0.02, 0.05]],
dtype=np.float32)
# voxel size = 0.01 m
volume = construct_test_volume1(device)
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np_originals = voxel_tsdf_and_weights.cpu().numpy()
# the first node moves 1 cm along the negative z axis (towards the camera).
node_dual_quaternions_dq3d = [dualquat(quat.identity(), quat(1.0, 0.0, 0.0, -0.005))] + [dualquat(quat.identity())] * (len(nodes) - 1)
node_rotations_mat = np.array([dq.rotation().to_rotation_matrix().astype(np.float32) for dq in node_dual_quaternions_dq3d])
node_translations_vec = np.array([dq.translation().astype(np.float32) for dq in node_dual_quaternions_dq3d])
depth = 50 # mm
image_width = 100
image_height = 100
image_resolution = (image_width, image_height)
depth_image = generate_xy_plane_depth_image(image_resolution, depth)
color_image = np.zeros((image_height, image_width, 3), dtype=np.uint8)
# let's imagine that the central surface point is 1 cm closer to the camera as well, so we alter the depth
# to 40 mm there. Make the motion cease at the other four nodes, e.g. their depth should remain at 50.
# We can make a radial "pinch" in the center of the depth image.
# For our predefined camera, 1 px = 0.005 m, and the nodes are around the 0.002 m radius,
# which puts our pixel radius at 0.002 / 0.0005 = 40 px
pinch_diameter = 40
pinch_radius = pinch_diameter // 2
pinch_height = 10
y_coordinates = np.linspace(-1, 1, pinch_diameter)[None, :] * pinch_height
x_coordinates = np.linspace(-1, 1, pinch_diameter)[:, None] * pinch_height
delta = -pinch_height + np.sqrt(x_coordinates ** 2 + y_coordinates ** 2)
half_image_width = image_width // 2
half_image_height = image_height // 2
# @formatter:off
depth_image[half_image_height - pinch_radius:half_image_height + pinch_radius,
half_image_width - pinch_radius:half_image_width + pinch_radius] += np.round(delta).astype(np.uint16)
# @formatter:on
# ---- compute normals ----
intrinsic_matrix = construct_intrinsic_matrix1_3x3()
fx, fy, cx, cy = intrinsic_matrix[0, 0], intrinsic_matrix[1, 1], intrinsic_matrix[0, 2], intrinsic_matrix[1, 2]
point_image = nnrt.backproject_depth_ushort(depth_image, fx, fy, cx, cy, 1000.0)
normals = compute_normals(device, point_image)
# ---- compute updates ----
node_coverage = 0.05
depth_image_o3d = o3d.t.geometry.Image.from_legacy_image(o3d.geometry.Image(depth_image), device=device)
color_image_o3d = o3d.t.geometry.Image.from_legacy_image(o3d.geometry.Image(color_image), device=device)
normals_o3d = o3c.Tensor(normals, dtype=o3c.Dtype.Float32, device=device)
intrinsic_matrix_o3d = o3c.Tensor(intrinsic_matrix, dtype=o3c.Dtype.Float32, device=device)
extrinsic_matrix_o3d = o3c.Tensor.eye(4, dtype=o3c.Dtype.Float32, device=device)
node_rotations_o3d = o3c.Tensor(node_rotations_mat, dtype=o3c.Dtype.Float32, device=device)
node_translations_o3d = o3c.Tensor(node_translations_vec, dtype=o3c.Dtype.Float32, device=device)
nodes_o3d = o3c.Tensor(nodes, dtype=o3c.Dtype.Float32, device=device)
edges_o3d = o3c.Tensor((1, 1))
cos_voxel_ray_to_normal = volume.integrate_warped_mat(
depth_image_o3d, color_image_o3d, normals_o3d, intrinsic_matrix_o3d, extrinsic_matrix_o3d,
nodes_o3d, edges_o3d, node_rotations_o3d, node_translations_o3d, node_coverage,
anchor_count=4, minimum_valid_anchor_count=3, depth_scale=1000.0, depth_max=3.0,
compute_anchors_using=nnrt_geom.AnchorComputationMethod.EUCLIDEAN, use_node_distance_thresholding=False)
cos_voxel_ray_to_normal = np.squeeze(cos_voxel_ray_to_normal.cpu().numpy())
voxel_tsdf_and_weights: o3c.Tensor = volume.extract_tsdf_values_and_weights()
voxel_tsdf_and_weights_np = voxel_tsdf_and_weights.cpu().numpy()
# voxel in the center of the plane is at 0, 0, 0.05,
# which should coincide with the first and only node
# voxel global position is (0, 0, 5) (in voxels)
# voxel is, presumably, in block 3
# voxel's index in block 0 is 5 * (8*8) = 320
# each block holds 512 voxels
center_plane_voxel_index = 512 + 512 + 512 + 320
indices_to_test = [center_plane_voxel_index,
center_plane_voxel_index + 1, # x + 1
center_plane_voxel_index + 8, # y + 1
center_plane_voxel_index + 16, # y + 2
center_plane_voxel_index + 64] # z + 1
# generated using the above function.
# Note: if anything about the reference implementation changes, these residuals need to be re-computed.
# each array row contains:
# v, u, cosine, tsdf, weight
ground_truth_data = np.array([
[50, 50, 0.4970065653324127, 0.0, 0.0],
[71, 50, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 71, 0.9784621335214618, 0.06499883711342021, 2.0],
[50, 92, 0.9215041958391356, 0.06362117264804237, 2.0],
[50, 50, 0.4970065653324127, 0.0, 0.0]
])
def check_voxel_at(index, ground_truth):
assert math.isclose(cos_voxel_ray_to_normal[int(ground_truth[0]), int(ground_truth[1])], ground_truth[2], abs_tol=1e-7)
if ground_truth[2] > 0.5:
assert np.allclose(voxel_tsdf_and_weights_np[index], ground_truth[3:])
for index, ground_truth in zip(indices_to_test, ground_truth_data):
check_voxel_at(index, ground_truth)
| 47.731392
| 138
| 0.699098
| 2,171
| 14,749
| 4.508521
| 0.135882
| 0.009808
| 0.008582
| 0.005721
| 0.821925
| 0.788823
| 0.778504
| 0.774418
| 0.76747
| 0.76747
| 0
| 0.081872
| 0.195878
| 14,749
| 308
| 139
| 47.886364
| 0.743423
| 0.172418
| 0
| 0.684466
| 0
| 0
| 0.004529
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 1
| 0.043689
| false
| 0
| 0.058252
| 0
| 0.126214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
62918911ecd51087e9458258624450c1c89aab02
| 20,362
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_vcs_rpc/get_vcs_details/output/vcs_details/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_vcs_rpc/get_vcs_details/output/vcs_details/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_vcs_rpc/get_vcs_details/output/vcs_details/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class vcs_details(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vcs - based on the path /brocade_vcs_rpc/get-vcs-details/output/vcs-details. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__principal_switch_wwn','__co_ordinator_wwn','__local_switch_wwn','__node_vcs_mode','__node_vcs_type','__node_vcs_id',)
_yang_name = 'vcs-details'
_rest_name = 'vcs-details'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__principal_switch_wwn = YANGDynClass(base=unicode, is_leaf=True, yang_name="principal-switch-wwn", rest_name="principal-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
self.__local_switch_wwn = YANGDynClass(base=unicode, is_leaf=True, yang_name="local-switch-wwn", rest_name="local-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
self.__node_vcs_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="node-vcs-id", rest_name="node-vcs-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='uint32', is_config=True)
self.__node_vcs_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="node-vcs-type", rest_name="node-vcs-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)
self.__node_vcs_mode = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="node-vcs-mode", rest_name="node-vcs-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='boolean', is_config=True)
self.__co_ordinator_wwn = YANGDynClass(base=unicode, is_leaf=True, yang_name="co-ordinator-wwn", rest_name="co-ordinator-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_vcs_rpc', u'get-vcs-details', u'output', u'vcs-details']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-vcs-details', u'output', u'vcs-details']
def _get_principal_switch_wwn(self):
"""
Getter method for principal_switch_wwn, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/principal_switch_wwn (string)
YANG Description: WWN of principal switch
"""
return self.__principal_switch_wwn
def _set_principal_switch_wwn(self, v, load=False):
"""
Setter method for principal_switch_wwn, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/principal_switch_wwn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_principal_switch_wwn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_principal_switch_wwn() directly.
YANG Description: WWN of principal switch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="principal-switch-wwn", rest_name="principal-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """principal_switch_wwn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="principal-switch-wwn", rest_name="principal-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)""",
})
self.__principal_switch_wwn = t
if hasattr(self, '_set'):
self._set()
def _unset_principal_switch_wwn(self):
self.__principal_switch_wwn = YANGDynClass(base=unicode, is_leaf=True, yang_name="principal-switch-wwn", rest_name="principal-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
def _get_co_ordinator_wwn(self):
"""
Getter method for co_ordinator_wwn, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/co_ordinator_wwn (string)
YANG Description: WWN of Co-ordinator switch
"""
return self.__co_ordinator_wwn
def _set_co_ordinator_wwn(self, v, load=False):
"""
Setter method for co_ordinator_wwn, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/co_ordinator_wwn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_co_ordinator_wwn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_co_ordinator_wwn() directly.
YANG Description: WWN of Co-ordinator switch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="co-ordinator-wwn", rest_name="co-ordinator-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """co_ordinator_wwn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="co-ordinator-wwn", rest_name="co-ordinator-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)""",
})
self.__co_ordinator_wwn = t
if hasattr(self, '_set'):
self._set()
def _unset_co_ordinator_wwn(self):
self.__co_ordinator_wwn = YANGDynClass(base=unicode, is_leaf=True, yang_name="co-ordinator-wwn", rest_name="co-ordinator-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
def _get_local_switch_wwn(self):
"""
Getter method for local_switch_wwn, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/local_switch_wwn (string)
YANG Description: WWN of local switch
"""
return self.__local_switch_wwn
def _set_local_switch_wwn(self, v, load=False):
"""
Setter method for local_switch_wwn, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/local_switch_wwn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_local_switch_wwn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_local_switch_wwn() directly.
YANG Description: WWN of local switch
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="local-switch-wwn", rest_name="local-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """local_switch_wwn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="local-switch-wwn", rest_name="local-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)""",
})
self.__local_switch_wwn = t
if hasattr(self, '_set'):
self._set()
def _unset_local_switch_wwn(self):
self.__local_switch_wwn = YANGDynClass(base=unicode, is_leaf=True, yang_name="local-switch-wwn", rest_name="local-switch-wwn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='string', is_config=True)
def _get_node_vcs_mode(self):
"""
Getter method for node_vcs_mode, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/node_vcs_mode (boolean)
YANG Description: Node's VCS mode
"""
return self.__node_vcs_mode
def _set_node_vcs_mode(self, v, load=False):
"""
Setter method for node_vcs_mode, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/node_vcs_mode (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_vcs_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_vcs_mode() directly.
YANG Description: Node's VCS mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="node-vcs-mode", rest_name="node-vcs-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """node_vcs_mode must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="node-vcs-mode", rest_name="node-vcs-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='boolean', is_config=True)""",
})
self.__node_vcs_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_node_vcs_mode(self):
self.__node_vcs_mode = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="node-vcs-mode", rest_name="node-vcs-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='boolean', is_config=True)
def _get_node_vcs_type(self):
"""
Getter method for node_vcs_type, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/node_vcs_type (vcs-cluster-type)
YANG Description: Vcs Type
"""
return self.__node_vcs_type
def _set_node_vcs_type(self, v, load=False):
"""
Setter method for node_vcs_type, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/node_vcs_type (vcs-cluster-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_vcs_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_vcs_type() directly.
YANG Description: Vcs Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="node-vcs-type", rest_name="node-vcs-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """node_vcs_type must be of a type compatible with vcs-cluster-type""",
'defined-type': "brocade-vcs:vcs-cluster-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="node-vcs-type", rest_name="node-vcs-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)""",
})
self.__node_vcs_type = t
if hasattr(self, '_set'):
self._set()
def _unset_node_vcs_type(self):
self.__node_vcs_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'vcs-fabric-cluster': {'value': 3}, u'vcs-unknown-cluster': {'value': 1}, u'vcs-stand-alone': {'value': 2}, u'vcs-management-cluster': {'value': 4}},), is_leaf=True, yang_name="node-vcs-type", rest_name="node-vcs-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='vcs-cluster-type', is_config=True)
def _get_node_vcs_id(self):
"""
Getter method for node_vcs_id, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/node_vcs_id (uint32)
YANG Description: Vcs Id
"""
return self.__node_vcs_id
def _set_node_vcs_id(self, v, load=False):
"""
Setter method for node_vcs_id, mapped from YANG variable /brocade_vcs_rpc/get_vcs_details/output/vcs_details/node_vcs_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_node_vcs_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_node_vcs_id() directly.
YANG Description: Vcs Id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="node-vcs-id", rest_name="node-vcs-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """node_vcs_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="node-vcs-id", rest_name="node-vcs-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='uint32', is_config=True)""",
})
self.__node_vcs_id = t
if hasattr(self, '_set'):
self._set()
def _unset_node_vcs_id(self):
self.__node_vcs_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="node-vcs-id", rest_name="node-vcs-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='uint32', is_config=True)
principal_switch_wwn = __builtin__.property(_get_principal_switch_wwn, _set_principal_switch_wwn)
co_ordinator_wwn = __builtin__.property(_get_co_ordinator_wwn, _set_co_ordinator_wwn)
local_switch_wwn = __builtin__.property(_get_local_switch_wwn, _set_local_switch_wwn)
node_vcs_mode = __builtin__.property(_get_node_vcs_mode, _set_node_vcs_mode)
node_vcs_type = __builtin__.property(_get_node_vcs_type, _set_node_vcs_type)
node_vcs_id = __builtin__.property(_get_node_vcs_id, _set_node_vcs_id)
_pyangbind_elements = {'principal_switch_wwn': principal_switch_wwn, 'co_ordinator_wwn': co_ordinator_wwn, 'local_switch_wwn': local_switch_wwn, 'node_vcs_mode': node_vcs_mode, 'node_vcs_type': node_vcs_type, 'node_vcs_id': node_vcs_id, }
| 63.236025
| 648
| 0.730036
| 2,871
| 20,362
| 4.87008
| 0.065134
| 0.042054
| 0.052067
| 0.024031
| 0.836146
| 0.80904
| 0.77564
| 0.74789
| 0.743885
| 0.727435
| 0
| 0.00518
| 0.146695
| 20,362
| 321
| 649
| 63.433022
| 0.79954
| 0.187604
| 0
| 0.441489
| 0
| 0.031915
| 0.326048
| 0.144163
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111702
| false
| 0
| 0.042553
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
65823e068524739fbd367e3a3cce3e8c59b03559
| 54
|
py
|
Python
|
anthill/tools/services/dlc/__init__.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
anthill/tools/services/dlc/__init__.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
anthill/tools/services/dlc/__init__.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
from .. import Service
class DLC(Service):
pass
| 9
| 22
| 0.666667
| 7
| 54
| 5.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240741
| 54
| 5
| 23
| 10.8
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
65915cfb5f0d60ed61928969fa329c67ac6249be
| 189
|
py
|
Python
|
pyJMSHTML2_config.py
|
msghens/pyCanvasStomp
|
3e18fdab3d0ce6d2c67038b13876c845debab765
|
[
"MIT"
] | null | null | null |
pyJMSHTML2_config.py
|
msghens/pyCanvasStomp
|
3e18fdab3d0ce6d2c67038b13876c845debab765
|
[
"MIT"
] | 1
|
2018-02-20T19:46:55.000Z
|
2018-02-20T19:46:55.000Z
|
pyJMSHTML2_config.py
|
msghens/pyCanvasStomp
|
3e18fdab3d0ce6d2c67038b13876c845debab765
|
[
"MIT"
] | null | null | null |
# Not necessary. Just wanted to separate program from credentials
def apikey():
return 'apikey'
def stomp_username():
return 'stomp user'
def stomp_password():
return 'stomp pass'
| 17.181818
| 65
| 0.73545
| 25
| 189
| 5.48
| 0.68
| 0.116788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174603
| 189
| 10
| 66
| 18.9
| 0.878205
| 0.333333
| 0
| 0
| 0
| 0
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.333333
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
659ccee1d3446b6f6c389374def9924b99deb934
| 39
|
py
|
Python
|
Lab 1.py
|
Pranav847/MB215Lab1
|
19d0c8768f94996cbbd80f185bfa0bd4e4ccbbb5
|
[
"MIT"
] | null | null | null |
Lab 1.py
|
Pranav847/MB215Lab1
|
19d0c8768f94996cbbd80f185bfa0bd4e4ccbbb5
|
[
"MIT"
] | null | null | null |
Lab 1.py
|
Pranav847/MB215Lab1
|
19d0c8768f94996cbbd80f185bfa0bd4e4ccbbb5
|
[
"MIT"
] | null | null | null |
print("Hello world from Pranav Pandey")
| 39
| 39
| 0.794872
| 6
| 39
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
02db946a4881dc1c5bc1dd8209c5ca7ab4bf0cc5
| 33
|
py
|
Python
|
pydmfet/libgen/__init__.py
|
fishjojo/pydmfe
|
93cfc655314933d3531b5733521a1f95a044f6cb
|
[
"MIT"
] | 3
|
2021-02-26T06:26:00.000Z
|
2022-02-20T08:58:20.000Z
|
pydmfet/libgen/__init__.py
|
fishjojo/pydmfet
|
93cfc655314933d3531b5733521a1f95a044f6cb
|
[
"MIT"
] | null | null | null |
pydmfet/libgen/__init__.py
|
fishjojo/pydmfet
|
93cfc655314933d3531b5733521a1f95a044f6cb
|
[
"MIT"
] | null | null | null |
from pydmfet.libgen.ops import *
| 16.5
| 32
| 0.787879
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
02e660bed4f590bed854c253bd541d2dd1b187eb
| 2,340
|
py
|
Python
|
pypeln/task/api/concat_task_test.py
|
quarckster/pypeln
|
f4160d0f4d4718b67f79a0707d7261d249459a4b
|
[
"MIT"
] | 1,281
|
2018-09-20T05:35:27.000Z
|
2022-03-30T01:29:48.000Z
|
pypeln/task/api/concat_task_test.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 78
|
2018-09-18T20:38:12.000Z
|
2022-03-30T20:16:02.000Z
|
pypeln/task/api/concat_task_test.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 88
|
2018-09-24T10:46:14.000Z
|
2022-03-28T09:34:50.000Z
|
import sys
import time
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_concat_basic(nums: tp.List[int]):
nums_py = list(map(lambda x: x + 1, nums))
nums_py1 = list(map(lambda x: x ** 2, nums_py))
nums_py2 = list(map(lambda x: -x, nums_py))
nums_py = nums_py1 + nums_py2
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.map(lambda x: x ** 2, nums_pl)
nums_pl2 = pl.task.map(lambda x: -x, nums_pl)
nums_pl = pl.task.concat([nums_pl1, nums_pl2])
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_concat_basic_2(nums: tp.List[int]):
nums_py = list(map(lambda x: x + 1, nums))
nums_py1 = list(map(lambda x: x ** 2, nums_py))
nums_py2 = list(map(lambda x: -x, nums_py))
nums_py = nums_py1 + nums_py2
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.map(lambda x: x ** 2, nums_pl)
nums_pl2 = pl.task.map(lambda x: -x, nums_pl)
nums_pl = await pl.task.concat([nums_pl1, nums_pl2])
assert sorted(nums_pl) == sorted(nums_py)
# @hp.given(nums=st.lists(st.integers()))
# @hp.settings(max_examples=MAX_EXAMPLES)
def test_concat_multiple(nums: tp.List[int] = [1, 2, 3]):
nums_py = [x + 1 for x in nums]
nums_py1 = nums_py + nums_py
nums_py2 = nums_py1 + nums_py
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.concat([nums_pl, nums_pl])
nums_pl2 = pl.task.concat([nums_pl1, nums_pl])
# assert sorted(nums_py1) == sorted(list(nums_pl1))
assert sorted(nums_py2) == sorted(list(nums_pl2))
@pl.task.utils.run_test_async
async def test_concat_multiple_2(nums: tp.List[int] = [1, 2, 3]):
nums_py = [x + 1 for x in nums]
nums_py1 = nums_py + nums_py
nums_py2 = nums_py1 + nums_py
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.concat([nums_pl, nums_pl])
nums_pl2 = await pl.task.concat([nums_pl1, nums_pl])
# assert sorted(nums_py1) == sorted(list(nums_pl1))
assert sorted(nums_py2) == sorted(list(nums_pl2))
| 29.620253
| 65
| 0.675214
| 418
| 2,340
| 3.557416
| 0.124402
| 0.072629
| 0.094149
| 0.103564
| 0.878278
| 0.876933
| 0.876933
| 0.870208
| 0.870208
| 0.870208
| 0
| 0.030319
| 0.182479
| 2,340
| 78
| 66
| 30
| 0.746994
| 0.076496
| 0
| 0.666667
| 0
| 0
| 0.000464
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 1
| 0.039216
| false
| 0
| 0.137255
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02eb2cd902368eda3d7a8cb691327e4d137cf22e
| 9,825
|
py
|
Python
|
timeflow/tests/tests.py
|
trimailov/timeflow
|
c77d448456eb55cb45ff2a594280d793abb4c2fc
|
[
"MIT"
] | 18
|
2015-11-21T07:56:20.000Z
|
2020-07-02T19:31:25.000Z
|
timeflow/tests/tests.py
|
trimailov/timeflow
|
c77d448456eb55cb45ff2a594280d793abb4c2fc
|
[
"MIT"
] | 2
|
2016-09-27T07:53:16.000Z
|
2018-09-03T06:11:47.000Z
|
timeflow/tests/tests.py
|
trimailov/timeflow
|
c77d448456eb55cb45ff2a594280d793abb4c2fc
|
[
"MIT"
] | 4
|
2015-12-08T10:06:08.000Z
|
2019-12-11T11:10:49.000Z
|
import datetime
import os
import pytest
import timeflow.utils
from timeflow import cli
FAKE_TIME = datetime.datetime(2015, 1, 1, 23, 59, 59)
@pytest.fixture
def patch_datetime_now(monkeypatch):
class mydatetime(datetime.datetime):
@classmethod
def now(cls):
return FAKE_TIME
monkeypatch.setattr(datetime, 'datetime', mydatetime)
def test_patch_datetime(patch_datetime_now):
assert datetime.datetime.now() == FAKE_TIME
def test_log(patch_datetime_now, tmpdir, capsys):
tmp_path = tmpdir.join("test_log.txt").strpath
timeflow.utils.LOG_FILE = tmp_path
# run log command
parser = cli.create_parser()
args = parser.parse_args(['log', 'message'])
args.func(args)
with open(tmp_path, 'r') as f:
lines = f.readlines()
assert len(lines) == 1
assert lines[0] == '2015-01-01 23:59: message\n'
def test_edit(patch_datetime_now, tmpdir, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run edit command
parser = cli.create_parser()
args = parser.parse_args(['edit'])
args.func(args)
def test_stats_now(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n"
"\n"
"Today working for: 15 hours 59 min\n")
assert out == result
def test_stats_yesterday(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--yesterday'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_day(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--day', '2015-01-01'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_this_week(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--this-week'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 8 hours 50 min\n"
"Slack: 3 hours 50 min\n")
assert out == result
def test_stats_last_week(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--last-week'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_week(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--week', '2015-01-01'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 8 hours 50 min\n"
"Slack: 3 hours 50 min\n")
assert out == result
def test_stats_last_month(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--last-month'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 5 hours 40 min\n"
"Slack: 2 hours 20 min\n")
assert out == result
def test_stats_this_month(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--this-month'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_month(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--month', '1'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 6 hours\n"
"Slack: 2 hours 40 min\n")
assert out == result
def test_stats_from(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--from', '2014-12-28'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 5 hours 40 min\n"
"Slack: 2 hours 20 min\n")
assert out == result
def test_stats_from_to(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--from', '2014-12-24',
'--to', '2015-01-01'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 8 hours 30 min\n"
"Slack: 3 hours 30 min\n")
assert out == result
def test_stats_now_report(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--report'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = (
"------------------------------ WORK -------------------------------\n"
"Django:\n"
" 1 hour 35 min: read documentation\n"
" Total: 1 hour 35 min\n"
"\n"
"Timeflow:\n"
" 1 hour 15 min: start project\n"
" Total: 1 hour 15 min\n"
"------------------------------ SLACK ------------------------------\n"
"Breakfast:\n"
" 0 hours 45 min: Breakfast\n"
" Total: 0 hours 45 min\n"
"\n"
"Slack:\n"
" 0 hours 25 min: watch YouTube\n"
" Total: 0 hours 25 min\n"
)
assert out == result
def test_stats_now_report_as_gtimelog(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--report-as-gtimelog'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = (
" time\n"
"Django: read documentation 1 hour 35 min\n"
"Timeflow: start project 1 hour 15 min"
"\n"
"\n"
"Total work done: 2 hours 50 min"
"\n"
"\n"
"By category:"
"\n"
"\n"
"Django 1 hour 35 min\n"
"Timeflow 1 hour 15 min\n\n"
)
assert out == result
| 30.896226
| 91
| 0.615267
| 1,364
| 9,825
| 4.268328
| 0.089443
| 0.021299
| 0.046719
| 0.051529
| 0.839918
| 0.822913
| 0.812264
| 0.812264
| 0.806939
| 0.782721
| 0
| 0.023957
| 0.260763
| 9,825
| 317
| 92
| 30.993691
| 0.77764
| 0.17771
| 0
| 0.580311
| 0
| 0
| 0.232454
| 0.015555
| 0
| 0
| 0
| 0
| 0.082902
| 1
| 0.093264
| false
| 0
| 0.025907
| 0.005181
| 0.129534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b88d955b8299342952ef21034b073db14c62568d
| 36
|
py
|
Python
|
feacher/__init__.py
|
qpochlabs/feacher
|
de4b8ef1ced03aaf694a8c7e593f2f5e4d3e9859
|
[
"MIT"
] | null | null | null |
feacher/__init__.py
|
qpochlabs/feacher
|
de4b8ef1ced03aaf694a8c7e593f2f5e4d3e9859
|
[
"MIT"
] | null | null | null |
feacher/__init__.py
|
qpochlabs/feacher
|
de4b8ef1ced03aaf694a8c7e593f2f5e4d3e9859
|
[
"MIT"
] | 1
|
2021-09-06T09:01:53.000Z
|
2021-09-06T09:01:53.000Z
|
from feacher.feacher import extract
| 18
| 35
| 0.861111
| 5
| 36
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b8c495503895e3241a41e4f988487978df1f34a5
| 490
|
py
|
Python
|
pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py
|
AlbertDeFusco/vscode-python
|
20349f2e8d5fa49df6b73b8094a4292d35ab767f
|
[
"MIT"
] | 2,461
|
2016-01-21T16:40:43.000Z
|
2022-03-31T12:01:55.000Z
|
pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py
|
AlbertDeFusco/vscode-python
|
20349f2e8d5fa49df6b73b8094a4292d35ab767f
|
[
"MIT"
] | 12,536
|
2019-05-06T21:26:14.000Z
|
2022-03-31T23:06:48.000Z
|
pythonFiles/tests/testing_tools/adapter/.data/complex/mod.py
|
vasili8m/vscode-python
|
846eee870e8b7bab38172600836faedb5fb80166
|
[
"MIT"
] | 871
|
2019-05-15T13:43:55.000Z
|
2022-03-31T03:04:35.000Z
|
"""
Examples:
>>> square(1)
1
>>> square(2)
4
>>> square(3)
9
>>> spam = Spam()
>>> spam.eggs()
42
"""
def square(x):
"""
Examples:
>>> square(1)
1
>>> square(2)
4
>>> square(3)
9
"""
return x * x
class Spam(object):
"""
Examples:
>>> spam = Spam()
>>> spam.eggs()
42
"""
def eggs(self):
"""
Examples:
>>> spam = Spam()
>>> spam.eggs()
42
"""
return 42
| 9.423077
| 25
| 0.379592
| 51
| 490
| 3.647059
| 0.313725
| 0.258065
| 0.193548
| 0.258065
| 0.752688
| 0.752688
| 0.344086
| 0.344086
| 0.344086
| 0.344086
| 0
| 0.068966
| 0.408163
| 490
| 51
| 26
| 9.607843
| 0.572414
| 0.510204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
b245d6d5ffa1ece3576dc720134240b2469fa631
| 266
|
py
|
Python
|
fixture/main.py
|
Cpt-Meow/PAT
|
bd1ff189e4cea17b2ae1609d6c2d690cccc2c206
|
[
"Apache-2.0"
] | null | null | null |
fixture/main.py
|
Cpt-Meow/PAT
|
bd1ff189e4cea17b2ae1609d6c2d690cccc2c206
|
[
"Apache-2.0"
] | null | null | null |
fixture/main.py
|
Cpt-Meow/PAT
|
bd1ff189e4cea17b2ae1609d6c2d690cccc2c206
|
[
"Apache-2.0"
] | null | null | null |
class MainClass:
class_number = 20
class_string = 'Hello, world'
def get_local_number(self):
return 14
def get_lass_number(self):
return MainClass.class_number
def get_class_string(self):
return MainClass.class_string
| 19
| 37
| 0.676692
| 34
| 266
| 5
| 0.411765
| 0.247059
| 0.235294
| 0.282353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020305
| 0.259399
| 266
| 13
| 38
| 20.461538
| 0.84264
| 0
| 0
| 0
| 0
| 0
| 0.045113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b26506e8e5882e289d2cd7f99122eda7ce66fb01
| 130
|
py
|
Python
|
camomilla/models/__init__.py
|
lotrekagency/camomilla
|
549892f95a97df5f8bacd3a54b074e16020bda65
|
[
"MIT"
] | 4
|
2021-05-11T20:17:58.000Z
|
2022-02-03T11:51:19.000Z
|
camomilla/models/__init__.py
|
lotrekagency/camomilla
|
549892f95a97df5f8bacd3a54b074e16020bda65
|
[
"MIT"
] | 3
|
2022-01-04T10:31:42.000Z
|
2022-01-21T12:51:16.000Z
|
camomilla/models/__init__.py
|
lotrekagency/camomilla
|
549892f95a97df5f8bacd3a54b074e16020bda65
|
[
"MIT"
] | 2
|
2022-02-03T11:51:23.000Z
|
2022-03-23T16:52:29.000Z
|
from .article import *
from .category import *
from .content import *
from .media import *
from .page import *
from .tag import *
| 18.571429
| 23
| 0.723077
| 18
| 130
| 5.222222
| 0.444444
| 0.531915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184615
| 130
| 6
| 24
| 21.666667
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b27d74f6ecafe6aa43047773d623fc72c3f2a748
| 3,661
|
py
|
Python
|
tests/mutate/test_remove_repeated_time_signatures.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 6
|
2020-05-18T09:28:29.000Z
|
2021-12-22T00:40:54.000Z
|
tests/mutate/test_remove_repeated_time_signatures.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 1
|
2021-04-21T20:29:38.000Z
|
2021-04-22T19:44:54.000Z
|
tests/mutate/test_remove_repeated_time_signatures.py
|
gilbertohasnofb/auxjad
|
553b7fe97221b6f378a93ade6262f024e3cbc678
|
[
"MIT"
] | 1
|
2021-04-21T18:54:46.000Z
|
2021-04-21T18:54:46.000Z
|
import abjad
import auxjad
def test_remove_repeated_time_signatures_01():
staff = abjad.Staff(r"c'4 d'8 | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[2])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
\time 3/8
c'4
d'8
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
c'4
d'8
}
"""
)
def test_remove_repeated_time_signatures_02():
staff = abjad.Staff(r"c'4 d'8 | e'4. | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[3])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
e'4.
\time 3/8
c'4
d'8
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
e'4.
c'4
d'8
}
"""
)
def test_remove_repeated_time_signatures_03():
staff = abjad.Staff([abjad.Note("c'2"),
abjad.Chord("<d' f'>2"),
abjad.Tuplet((2, 3), "g2 a2 b2"),
])
abjad.attach(abjad.TimeSignature((2, 2)), staff[0])
abjad.attach(abjad.TimeSignature((2, 2)), staff[2][0])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 2/2
c'2
<d' f'>2
\times 2/3
{
\time 2/2
g2
a2
b2
}
}
"""
)
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 2/2
c'2
<d' f'>2
\times 2/3
{
g2
a2
b2
}
}
"""
)
def test_remove_repeated_time_signatures_04():
staff = abjad.Staff(r"c'2 d'2 | e'2 d'2")
abjad.attach(abjad.TimeSignature((4, 4)), staff[2])
auxjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
c'2
d'2
e'2
d'2
}
"""
)
def test_remove_repeated_time_signatures_05():
staff = abjad.Staff(r"c'4 d'8 | c'4 d'8")
abjad.attach(abjad.TimeSignature((3, 8)), staff[0])
abjad.attach(abjad.TimeSignature((3, 8)), staff[2])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
\time 3/8
c'4
d'8
}
"""
)
abjad.mutate.remove_repeated_time_signatures(staff[:])
assert abjad.lilypond(staff) == abjad.String.normalize(
r"""
\new Staff
{
\time 3/8
c'4
d'8
c'4
d'8
}
"""
)
| 22.598765
| 59
| 0.438951
| 424
| 3,661
| 3.695755
| 0.09434
| 0.022974
| 0.034461
| 0.045948
| 0.913848
| 0.894703
| 0.823867
| 0.77792
| 0.757498
| 0.757498
| 0
| 0.06241
| 0.43103
| 3,661
| 161
| 60
| 22.73913
| 0.68987
| 0
| 0
| 0.357143
| 0
| 0
| 0.044028
| 0
| 0
| 0
| 0
| 0
| 0.160714
| 1
| 0.089286
| false
| 0
| 0.035714
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b29e77b8aec769af7b004e180561c70617064ad3
| 44
|
py
|
Python
|
kmri/__init__.py
|
robianmcd/keras-mri
|
dd8619ca848cb64555fbd7aca5b7aa1941cdc08b
|
[
"MIT"
] | 12
|
2019-04-18T13:32:48.000Z
|
2020-06-19T13:45:34.000Z
|
kmri/__init__.py
|
robianmcd/keras-mri
|
dd8619ca848cb64555fbd7aca5b7aa1941cdc08b
|
[
"MIT"
] | 1
|
2019-06-20T03:44:07.000Z
|
2019-06-21T14:35:44.000Z
|
kmri/__init__.py
|
robianmcd/keras-mri
|
dd8619ca848cb64555fbd7aca5b7aa1941cdc08b
|
[
"MIT"
] | 3
|
2019-04-18T19:36:52.000Z
|
2020-01-30T22:51:02.000Z
|
from kmri.visualizer import visualize_model
| 22
| 43
| 0.886364
| 6
| 44
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a255e3e56be6ae89b898c9ba2333ca11a0bcf2d8
| 96
|
py
|
Python
|
backend/util/request/models/order_item/__init__.py
|
willrp/willorders-ws
|
de0757d8888dab41095c93500a6a88c813755530
|
[
"MIT"
] | null | null | null |
backend/util/request/models/order_item/__init__.py
|
willrp/willorders-ws
|
de0757d8888dab41095c93500a6a88c813755530
|
[
"MIT"
] | null | null | null |
backend/util/request/models/order_item/__init__.py
|
willrp/willorders-ws
|
de0757d8888dab41095c93500a6a88c813755530
|
[
"MIT"
] | null | null | null |
from .order_item_request import OrderItemRequest
from .order_item_schema import OrderItemSchema
| 32
| 48
| 0.895833
| 12
| 96
| 6.833333
| 0.666667
| 0.219512
| 0.317073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 49
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a2d132ca5a5de5910fa31e673615af2eb6bc9e88
| 28,419
|
py
|
Python
|
test/functional/tl_vesting.py
|
sinetek/BlockPo-to-Tradelayer
|
c7039526aada601790b167f2cc205a21b1248992
|
[
"MIT"
] | null | null | null |
test/functional/tl_vesting.py
|
sinetek/BlockPo-to-Tradelayer
|
c7039526aada601790b167f2cc205a21b1248992
|
[
"MIT"
] | 5
|
2021-06-21T21:21:53.000Z
|
2021-06-22T20:10:16.000Z
|
test/functional/tl_vesting.py
|
sinetek/BlockPo-to-Tradelayer
|
c7039526aada601790b167f2cc205a21b1248992
|
[
"MIT"
] | 1
|
2021-06-21T21:14:45.000Z
|
2021-06-21T21:14:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Vesting tokens."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import json
import math
import http.client
import urllib.parse
class VestingBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-txindex=1"]]
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
with open(os.path.join(self.options.tmpdir+"/node0", "litecoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
def run_test(self):
self.log.info("Preparing the workspace...")
# mining 1000 blocks, total budget: 14949.77187643 LTC
for i in range(0,2):
self.nodes[0].generate(500)
blocks = 500*(i+1)
self.log.info(str(blocks)+" blocks mined...")
################################################################################
# Checking RPC tl_sendvesting and related (starting in block 1000) #
################################################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
addresses = []
accounts = ["john", "doe", "another", "mark", "tango"]
# for graphs for addresses[1]
vested = []
unvested = []
volume_ltc = []
#vested ALL for addresses[4]
bvested = []
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
self.log.info("watching LTC general balance")
params = str([""]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "getbalance",params)
self.log.info(out)
assert_equal(out['error'], None)
adminAddress = 'QgKxFUBgR8y4xFy3s9ybpbDvYNKr4HTKPb'
privkey = 'cTkpBcU7YzbJBi7U59whwahAMcYwKT78yjZ2zZCbLsCZ32Qp5Wta'
self.log.info("importing admin address")
params = str([privkey]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "importprivkey",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.log.info("watching private key of admin address")
params = str([adminAddress]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "dumpprivkey",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.log.info("Creating addresses")
addresses = tradelayer_createAddresses(accounts, conn, headers)
addresses.append(adminAddress)
# self.log.info(addresses)
self.log.info("Funding addresses with LTC")
amount = 5
tradelayer_fundingAddresses(addresses, amount, conn, headers)
self.nodes[0].generate(1)
self.log.info("Checking the LTC balance in every account")
tradelayer_checkingBalance(accounts, amount, conn, headers)
self.log.info("Funding addresses[3] with 12000 LTC")
amount = 2000
params = str([addresses[3], amount]).replace("'",'"')
for i in range(0,6):
out = tradelayer_HTTP(conn, headers, False, "sendtoaddress",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Creating new tokens (sendissuancefixed)")
array = [0]
params = str([addresses[2],2,0,"lihki","","","90000000",array]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_sendissuancefixed",params)
# self.log.info(out)
self.log.info("Self Attestation for addresses")
tradelayer_selfAttestation(addresses,conn, headers)
self.log.info("Checking attestations")
out = tradelayer_HTTP(conn, headers, False, "tl_list_attestation")
# self.log.info(out)
result = []
registers = out['result']
for addr in addresses:
for i in registers:
if i['att sender'] == addr and i['att receiver'] == addr and i['kyc_id'] == 0:
result.append(True)
assert_equal(result, [True, True, True, True, True, True])
self.log.info("Checking vesting tokens property")
params = str([3])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
# self.log.info(out)
assert_equal(out['result']['propertyid'],3)
assert_equal(out['result']['name'],'Vesting Tokens')
assert_equal(out['result']['data'],'Divisible Tokens')
assert_equal(out['result']['url'],'www.tradelayer.org')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'1500000.00000000')
self.log.info("Checking the property")
params = str([4])
out = tradelayer_HTTP(conn, headers, True, "tl_getproperty",params)
assert_equal(out['error'], None)
# self.log.info(out)
assert_equal(out['result']['propertyid'],4)
assert_equal(out['result']['name'],'lihki')
assert_equal(out['result']['data'],'')
assert_equal(out['result']['url'],'')
assert_equal(out['result']['divisible'],True)
assert_equal(out['result']['totaltokens'],'90000000.00000000')
self.log.info("sendvesting from adminAddress to first address")
params = str([adminAddress, addresses[0], "2000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address")
params = str([addresses[0], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'2000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking unvested ALLs ")
params = str([addresses[0]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'2000.00000000')
self.log.info("Checking the time lock of one year")
self.log.info("sendvesting from first to second address")
params = str([addresses[0], addresses[1], "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Checking tokens in receiver address")
params = str([addresses[1], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'0.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'0.00000000')
out = tradelayer_HTTP(conn, headers, True, "tl_getinfo")
block = out['result']['block']
self.log.info("block height :"+str(block))
self.log.info("Waiting for one year")
for i in range(20):
self.nodes[0].generate(1)
self.log.info("sendvesting from first to second address, again")
params = str([addresses[0], addresses[1], "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("sendvesting from first to 5th addresses")
params = str([addresses[0], addresses[4], "500"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_sendvesting",params)
# self.log.info(out)
assert_equal(out['error'], None)
self.nodes[0].generate(1)
self.log.info("Restarting for the node, in order to test persistence")
self.restart_node(0) #stop and start
url = urllib.parse.urlparse(self.nodes[0].url)
#New authpair
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
self.log.info("Checking tokens in receiver addresses")
params = str([addresses[1], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
params = str([addresses[4], 3]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'500.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'1000.00000000')
params = str([addresses[4]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'500.00000000')
# 200 LTC implies release 7.5% of ALLs from unvested to balance
# NOTE: In regtest 200 LTC volume is equivalent to 20000 (x100) LTCs in testnet or mainnet
self.log.info("Creating LTC volume in DEx")
self.log.info("Sending a DEx sell tokens offer")
params = str([addresses[2], 4, "1000", "200", 250, "0.00001", "2", 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexoffer",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer in DEx")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['propertyid'], 4)
assert_equal(out['result'][0]['action'], 2)
assert_equal(out['result'][0]['seller'], addresses[2])
assert_equal(out['result'][0]['ltcsdesired'], '200.00000000')
assert_equal(out['result'][0]['amountavailable'], '1000.00000000')
assert_equal(out['result'][0]['unitprice'], '0.20000000')
assert_equal(out['result'][0]['minimumfee'], '0.00001000')
self.log.info("Accepting the full offer")
params = str([addresses[3], addresses[2], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexaccept",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer status")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['propertyid'], 4)
assert_equal(out['result'][0]['action'], 2)
assert_equal(out['result'][0]['seller'], addresses[2])
assert_equal(out['result'][0]['ltcsdesired'], '0.00000000')
assert_equal(out['result'][0]['amountavailable'], '0.00000000')
assert_equal(out['result'][0]['unitprice'], '0.20000000')
assert_equal(out['result'][0]['minimumfee'], '0.00001000')
assert_equal(out['result'][0]['accepts'][0]['buyer'], addresses[3])
assert_equal(out['result'][0]['accepts'][0]['amountdesired'], '1000.00000000')
assert_equal(out['result'][0]['accepts'][0]['ltcstopay'], '200.00000000')
self.log.info("Paying the tokens")
params = str([addresses[3], addresses[2], "200"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send_dex_payment",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking token balance in buyer address")
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'], '1000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking LTC Volume")
params = str([4, 1, 3000]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['volume'], '200.00000000')
volume0 = float(out['result']['volume'])
self.nodes[0].generate(1)
self.log.info("Checking vesting in related address")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'75.25749000') # 7.5% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested0 = float(out['result']['balance'])
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
# assert_equal(out['result']['balance'],'75.25749000') # 7.5% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested1 = float(out['result']['balance'])
bvested.append(vested1)
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'924.74251000')
unvested0 = float(out['result']['unvested'])
volume_ltc.append(volume0)
vested.append(vested0)
unvested.append(unvested0)
self.log.info("Checking vesting info")
out = tradelayer_HTTP(conn, headers, False, "tl_getvesting_info",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['propertyid'], 3)
assert_equal(out['result']['name'], 'Vesting Tokens')
assert_equal(out['result']['data'], 'Divisible Tokens')
assert_equal(out['result']['url'], 'www.tradelayer.org')
assert_equal(out['result']['divisible'], True)
assert_equal(out['result']['issuer'], 'QgKxFUBgR8y4xFy3s9ybpbDvYNKr4HTKPb')
assert_equal(out['result']['activation block'], 100)
assert_equal(out['result']['litecoin volume'], '200.00000000')
assert_equal(out['result']['vested percentage'], '7.52574900')
assert_equal(out['result']['last vesting block'], 1037)
assert_equal(out['result']['total vested'], '150.51498000')
assert_equal(out['result']['owners'], 3)
assert_equal(out['result']['total tokens'], '1500000.00000000')
assert_equal(out['result']['kyc_ids allowed'], '[]')
# 400 LTC implies release 15.05% of ALLs from unvested to balance
# Remember: 400 LTCs in regtest is 40000 (x100) LTCs in testnet/mainnet
self.log.info("Sending a DEx sell tokens offer")
params = str([addresses[2], 4, "1000000", "200000", 250, "0.00001", "2", 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexoffer",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer in DEx")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['propertyid'], 4)
assert_equal(out['result'][0]['action'], 2)
assert_equal(out['result'][0]['seller'], addresses[2])
assert_equal(out['result'][0]['ltcsdesired'], '200000.00000000')
assert_equal(out['result'][0]['amountavailable'], '1000000.00000000')
assert_equal(out['result'][0]['unitprice'], '0.20000000')
assert_equal(out['result'][0]['minimumfee'], '0.00001000')
self.log.info("Accepting the part of the offer")
params = str([addresses[3], addresses[2], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexaccept",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer status")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['accepts'][0]['buyer'], addresses[3])
assert_equal(out['result'][0]['accepts'][0]['amountdesired'], '1000.00000000')
assert_equal(out['result'][0]['accepts'][0]['ltcstopay'], '200.00000000')
self.log.info("Paying the tokens")
params = str([addresses[3], addresses[2], "200"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send_dex_payment",params)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking token balance in buyer address")
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'], '2000.00000000')
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking LTC Volume")
params = str([4, 1, 99999]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['volume'], '400.00000000')
volume1 = float(out['result']['volume'])
self.nodes[0].generate(2)
self.log.info("Checking vesting in related addresses")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'150.51498000') # 15.05% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested1 = float(out['result']['balance'])
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
# assert_equal(out['result']['balance'],'150.51498000') # 15.05% of vesting (NOTE: check the round up)
assert_equal(out['result']['reserve'],'0.00000000')
vested2 = float(out['result']['balance'])
bvested.append(vested2)
self.log.info("Checking unvested ALLs ")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['result']['unvested'],'849.48502000')
unvested1 = float(out['result']['unvested'])
volume_ltc.append(volume1)
vested.append(vested1)
unvested.append(unvested1)
self.log.info("Checking vesting info")
out = tradelayer_HTTP(conn, headers, False, "tl_getvesting_info",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['propertyid'], 3)
assert_equal(out['result']['name'], 'Vesting Tokens')
assert_equal(out['result']['data'], 'Divisible Tokens')
assert_equal(out['result']['url'], 'www.tradelayer.org')
assert_equal(out['result']['divisible'], True)
assert_equal(out['result']['issuer'], 'QgKxFUBgR8y4xFy3s9ybpbDvYNKr4HTKPb')
assert_equal(out['result']['activation block'], 100)
assert_equal(out['result']['litecoin volume'], '400.00000000')
assert_equal(out['result']['vested percentage'], '15.05149900')
assert_equal(out['result']['last vesting block'], 1041)
assert_equal(out['result']['total vested'], '301.02996000')
assert_equal(out['result']['owners'], 3)
assert_equal(out['result']['total tokens'], '1500000.00000000')
assert_equal(out['result']['kyc_ids allowed'], '[]')
# Adding 200 LTCs in each step
for i in range(0,20):
self.log.info("Loop number:"+str(i))
# self.log.info("Checking the offer in DEx")
# params = str([addresses[2]]).replace("'",'"')
# out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
self.log.info("Accepting the part of the offer")
params = str([addresses[3], addresses[2], 4, "1000"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_senddexaccept",params)
assert_equal(out['error'], None)
# self.log.info(out)
self.nodes[0].generate(1)
self.log.info("Checking the offer status")
params = str([addresses[2]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getactivedexsells",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result'][0]['accepts'][0]['buyer'], addresses[3])
assert_equal(out['result'][0]['accepts'][0]['amountdesired'], '1000.00000000')
assert_equal(out['result'][0]['accepts'][0]['ltcstopay'], '200.00000000')
self.log.info("Paying the tokens")
params = str([addresses[3], addresses[2], "200"]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_send_dex_payment",params)
# self.log.info(out)
self.nodes[0].generate(1)
time.sleep(0.35)
self.log.info("Checking token balance in buyer address")
params = str([addresses[3], 4]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
nresult = 2000 + 1000 * (i + 1)
sresult = str(nresult)+'.00000000'
assert_equal(out['result']['balance'], sresult)
assert_equal(out['result']['reserve'],'0.00000000')
self.log.info("Checking LTC Volume")
params = str([4, 1, 99999]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
nvolume = 400 + 200 * (i + 1)
svolume = str(nvolume)+'.00000000'
assert_equal(out['result']['volume'], svolume)
volume1 = float(out['result']['volume'])
self.nodes[0].generate(1)
self.log.info("Checking vesting in in addresses[1]")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['reserve'],'0.00000000')
vested1 = float(out['result']['balance'])
self.log.info("Checking unvested ALLs in addresses[1]")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
unvested1 = float(out['result']['unvested'])
assert_equal(unvested1 + vested1, 1000)
volume_ltc.append(volume1)
vested.append(vested1)
unvested.append(unvested1)
self.log.info("Checking vesting in addresses[4]")
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['reserve'],'0.00000000')
vested2 = float(out['result']['balance'])
bvested.append(vested2)
self.log.info("Checking unvested ALLs in addresses[4]")
params = str([addresses[4]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
unvested2 = float(out['result']['unvested'])
assert_equal(unvested2 + vested2, 500)
time.sleep(0.2)
self.log.info("Checking LTC Volume")
params = str([4, 1, 99999]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, True, "tl_get_ltcvolume",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['volume'], '4400.00000000')
# At this volume the vesting must be 41.08 %
self.log.info("Checking final vesting in addresses[1]")
params = str([addresses[1], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'410.86307000')
self.log.info("Checking final unvested ALLs in addresses[1]")
params = str([addresses[1]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['unvested'], '589.13693000')
self.log.info("Checking final vesting in addresses[4]")
params = str([addresses[4], 1]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getbalance",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['balance'],'205.43153500')
self.log.info("Checking final unvested ALLs in addresses[4]")
params = str([addresses[4]]).replace("'",'"')
out = tradelayer_HTTP(conn, headers, False, "tl_getunvested",params)
# self.log.info(out)
assert_equal(out['error'], None)
assert_equal(out['result']['unvested'], '294.56846500')
# pl.plot(volume_ltc, vested,'-b', label='vested amount for addresses[1]')
# pl.plot(volume_ltc, bvested,'-r', label='vested amount for addresses[3]')
# pl.legend(loc='upper left')
# pl.show()
conn.close()
self.stop_nodes()
if __name__ == '__main__':
VestingBasicsTest ().main ()
| 41.854197
| 128
| 0.597699
| 3,324
| 28,419
| 5.015042
| 0.108604
| 0.10096
| 0.125975
| 0.129574
| 0.806539
| 0.777864
| 0.758908
| 0.722196
| 0.709238
| 0.691302
| 0
| 0.058471
| 0.224885
| 28,419
| 678
| 129
| 41.915929
| 0.698202
| 0.090925
| 0
| 0.615034
| 0
| 0
| 0.245065
| 0.01118
| 0
| 0
| 0
| 0
| 0.343964
| 1
| 0.006834
| false
| 0.006834
| 0.020501
| 0
| 0.029613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2d6865a73d54f43449b382e2b06520fa79b40fe
| 6,309
|
py
|
Python
|
tools/gen_dispatcher_collect_macros.py
|
RaviGaddipati/libsimdpp
|
d09298cef781bfd090f61fbf8d9c1c8bf5d4374a
|
[
"BSL-1.0"
] | null | null | null |
tools/gen_dispatcher_collect_macros.py
|
RaviGaddipati/libsimdpp
|
d09298cef781bfd090f61fbf8d9c1c8bf5d4374a
|
[
"BSL-1.0"
] | null | null | null |
tools/gen_dispatcher_collect_macros.py
|
RaviGaddipati/libsimdpp
|
d09298cef781bfd090f61fbf8d9c1c8bf5d4374a
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (C) 2015 Povilas Kanapickas <povilas@radix.lt>
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Generates the simdpp/dispatch/collect_macros_generated.h file
# Use as $ ./tools/gen_dispatcher_collect_macros.py > simdpp/dispatch/collect_macros_generated.h
from gen_common import output_template
num_archs = 15
single_arch_template = '''
#ifdef SIMDPP_DISPATCH_ARCH$num$
#define SIMDPP_ARCH_PP_LIST SIMDPP_DISPATCH_ARCH$num$
#include <simdpp/detail/preprocess_single_arch.h>
// Use the results of preprocess_single_arch.h to define
// SIMDPP_DISPATCH_$num$_NAMESPACE
#if SIMDPP_ARCH_PP_NS_USE_NULL
#define SIMDPP_DISPATCH_$num$_NS_ID_NULL SIMDPP_INSN_ID_NULL
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_NULL
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSE2
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE2 SIMDPP_INSN_ID_SSE2
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE2
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSE3
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE3 SIMDPP_INSN_ID_SSE3
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE3
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSSE3
#define SIMDPP_DISPATCH_$num$_NS_ID_SSSE3 SIMDPP_INSN_ID_SSSE3
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSSE3
#endif
#if SIMDPP_ARCH_PP_NS_USE_SSE4_1
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE4_1 SIMDPP_INSN_ID_SSE4_1
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_SSE4_1
#endif
#if SIMDPP_ARCH_PP_NS_USE_AVX
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX SIMDPP_INSN_ID_AVX
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX
#endif
#if SIMDPP_ARCH_PP_NS_USE_AVX2
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX2 SIMDPP_INSN_ID_AVX2
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX2
#endif
#if SIMDPP_ARCH_PP_NS_USE_FMA3
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA3 SIMDPP_INSN_ID_FMA3
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA3
#endif
#if SIMDPP_ARCH_PP_NS_USE_FMA4
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA4 SIMDPP_INSN_ID_FMA4
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_FMA4
#endif
#if SIMDPP_ARCH_PP_NS_USE_XOP
#define SIMDPP_DISPATCH_$num$_NS_ID_XOP SIMDPP_INSN_ID_XOP
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_XOP
#endif
#if SIMDPP_ARCH_PP_NS_USE_AVX512F
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX512F SIMDPP_INSN_ID_AVX512F
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_AVX512F
#endif
#if SIMDPP_ARCH_PP_NS_USE_NEON
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON SIMDPP_INSN_ID_NEON
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON
#endif
#if SIMDPP_ARCH_PP_NS_USE_NEON_FLT_SP
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON_FLT_SP SIMDPP_INSN_ID_NEON_FLT_SP
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_NEON_FLT_SP
#endif
#if SIMDPP_ARCH_PP_NS_USE_ALTIVEC
#define SIMDPP_DISPATCH_$num$_NS_ID_ALTIVEC SIMDPP_INSN_ID_ALTIVEC
#else
#define SIMDPP_DISPATCH_$num$_NS_ID_ALTIVEC
#endif
#define SIMDPP_DISPATCH_$num$_NAMESPACE SIMDPP_PP_PASTE15(arch, $n$
SIMDPP_DISPATCH_$num$_NS_ID_NULL, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSE2, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSE3, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSSE3, $n$
SIMDPP_DISPATCH_$num$_NS_ID_SSE4_1, $n$
SIMDPP_DISPATCH_$num$_NS_ID_AVX, $n$
SIMDPP_DISPATCH_$num$_NS_ID_AVX2, $n$
SIMDPP_DISPATCH_$num$_NS_ID_AVX512F, $n$
SIMDPP_DISPATCH_$num$_NS_ID_FMA3, $n$
SIMDPP_DISPATCH_$num$_NS_ID_FMA4, $n$
SIMDPP_DISPATCH_$num$_NS_ID_XOP, $n$
SIMDPP_DISPATCH_$num$_NS_ID_NEON, $n$
SIMDPP_DISPATCH_$num$_NS_ID_NEON_FLT_SP, $n$
SIMDPP_DISPATCH_$num$_NS_ID_ALTIVEC)
#define SIMDPP_DISPATCH_$num$_FN_REGISTER(ARRAY,NAME,FUN_TYPE) $n$
ARRAY[$num$-1] = SIMDPP_DISPATCH_$num$_NAMESPACE::register_fn_##NAME((FUN_TYPE)(NULL));
#define SIMDPP_DISPATCH_$num$_FN_DECLARE(NAME,FUN_TYPE) $n$
namespace SIMDPP_DISPATCH_$num$_NAMESPACE { $n$
::simdpp::detail::FnVersion register_fn_##NAME(FUN_TYPE); }
#undef SIMDPP_ARCH_PP_LIST
#else
#define SIMDPP_DISPATCH_$num$_FN_REGISTER(ARRAY,NAME,FUN_TYPE)
#define SIMDPP_DISPATCH_$num$_FN_DECLARE(NAME,FUN_TYPE)
#endif'''
single_fn_register_template = ' SIMDPP_DISPATCH_$num$_FN_REGISTER(ARRAY,NAME,FUN_TYPE) $n$'
single_fn_declare_template = ' SIMDPP_DISPATCH_$num$_FN_DECLARE(NAME,FUN_TYPE) $n$'
# print the actual file
print('''/* Copyright (C) 2015 Povilas Kanapickas <povilas@radix.lt>
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
*/
// This file is generated automatically. See tools/gen_dispatcher_collect_macros.py
#ifndef LIBSIMDPP_DISPATCH_COLLECT_MACROS_GENERATED_H
#define LIBSIMDPP_DISPATCH_COLLECT_MACROS_GENERATED_H
#ifndef LIBSIMDPP_SIMD_H
#error "This file must be included through simd.h"
#endif
#if SIMDPP_EMIT_DISPATCHER
''')
print('#define SIMDPP_DISPATCH_MAX_ARCHS ' + str(num_archs) + '\n')
for i in range(1, num_archs+1):
vars = { 'num' : str(i) }
output_template(single_arch_template, vars)
print('''
#define SIMDPP_DISPATCH_DECLARE_FUNCTIONS(NAME,FUN_TYPE) \\''')
for i in range(1, num_archs+1):
vars = { 'num' : str(i) }
output_template(single_fn_declare_template, vars)
print('''
#define SIMDPP_DISPATCH_COLLECT_FUNCTIONS(ARRAY,NAME,FUN_TYPE) \\''')
for i in range(1, num_archs+1):
vars = { 'num' : str(i) }
output_template(single_fn_register_template, vars)
print('''
#endif // SIMDPP_EMIT_DISPATCHER
#endif
''')
| 36.680233
| 96
| 0.687906
| 885
| 6,309
| 4.370621
| 0.133333
| 0.213547
| 0.228542
| 0.206308
| 0.740176
| 0.703723
| 0.547311
| 0.266029
| 0.217166
| 0.166236
| 0
| 0.017946
| 0.231574
| 6,309
| 171
| 97
| 36.894737
| 0.779909
| 0.066255
| 0
| 0.308824
| 0
| 0.007353
| 0.899184
| 0.480619
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007353
| 0
| 0.007353
| 0.036765
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2ec9c71e1c843657fa87f07f58398dfc5d57038
| 117,639
|
py
|
Python
|
main.py
|
hannu-hell/digu_card_game
|
64e529d0b4f1b9414722f8438817233c2cffded5
|
[
"MIT"
] | 5
|
2020-05-28T05:41:28.000Z
|
2020-05-29T03:08:32.000Z
|
main.py
|
hannu-hell/digu_card_game
|
64e529d0b4f1b9414722f8438817233c2cffded5
|
[
"MIT"
] | null | null | null |
main.py
|
hannu-hell/digu_card_game
|
64e529d0b4f1b9414722f8438817233c2cffded5
|
[
"MIT"
] | null | null | null |
from digu import *
from tkinter import *
import pygame
import random
from tkinter import messagebox
# tkinter preconditions
game_quit = False
check_for_quit = False
def quit_pi():
global run, game_quit, check_for_quit
initial_info.destroy()
game_quit = True
check_for_quit = True
player_information = []
Trumps = False
# Pregame information
# tkinter Functions
def get_info():
global Trumps
player_name = text_1.get()
if player_name == '':
messagebox.showerror('Error', 'Please Enter a valid Player Name')
else:
player_information.append(player_name)
if coin_toss(call_trumps.get()):
Trumps = True
else:
Trumps = False
initial_info.quit()
initial_info.destroy()
initial_info = Tk()
initial_info.title('Digu Player Information')
frame_1 = Frame(initial_info)
frame_1.pack(side=LEFT)
label_1 = Label(frame_1, text='Player Name')
label_1.pack(side=TOP)
text_1 = Entry(frame_1)
text_1.pack(side=TOP)
label_2 = Label(frame_1, text='Call Heads or Tails for calling Trumps')
label_2.pack(side=TOP)
call_trumps = StringVar(initial_info)
call_trumps.set('Heads')
option = OptionMenu(frame_1, call_trumps, 'Heads', 'Tails')
option.pack(side=TOP)
button_1 = Button(frame_1, text='Confirm', command=get_info)
button_1.pack(side=BOTTOM)
initial_info.protocol("WM_DELETE_WINDOW", quit_pi) # if 'x' on the window is pressed then the function is performed
initial_info.mainloop()
# Py_game Preconditions
if game_quit is False:
pygame.init()
win = pygame.display.set_mode((1000, 668))
pygame.display.set_caption('DIGU')
background = pygame.image.load('wooden_table.jpg')
dhu = pygame.image.load('dhufun.png')
icon = pygame.image.load('cards_icon.png')
pygame.display.set_icon(icon)
font = pygame.font.Font(None, 30)
i_font = pygame.font.Font(None, 20)
player_name = font.render(player_information[0].upper(), True, (0, 150, 255))
player_teammate = font.render(player_information[0].upper() + " TEAMMATE", True, (0, 150, 255))
computer = font.render('COMP', True, (255, 255, 255))
computer = pygame.transform.rotate(computer, 90)
comp_team_mate = font.render("COMP TEAMMATE", True, (255, 255, 255))
comp_team_mate = pygame.transform.rotate(comp_team_mate, 270)
main_deck = Deck()
main_deck.shuffle()
player_hand = main_deck.deal_hand(13)
player_teammate_hand = main_deck.deal_hand(13)
computer_hand = main_deck.deal_hand(13)
computer_teammate_hand = main_deck.deal_hand(13)
displayed_names = False
player_teammate_X = 350
player_trump_x = 750
player_trump_change = 1
player_trump_selected = False
comp_trump_selected = False
trump = ''
stop_initialize = False
player_round_done = False
comp_round_done = False
player_tm_round_done = False
comp_tm_round_done = False
comp_trumps = []
comp_teammate_trumps = []
player_teammate_trumps = []
player_played_hands = []
comp_played_hands = []
player_teammate_played_hands = []
comp_teammate_played_hands = []
trump_list_chosen = False
comp_won_hands = 0
player_won_hands = 0
comp_tm_won_hands = 0
player_tm_won_hands = 0
all_rounds_done = False
winner = None
game_intro = True
x1 = 90
y1 = 260
x2 = 250
y2 = 480
x3 = 820
y3 = 260
x4 = 250
y4 = 130
round_status = dict(round1=False, round2=False, round3=False, round4=False, round5=False, round6=False,
round7=False,
round8=False, round9=False, round10=False, round11=False, round12=False, round13=False)
deck_clubs = [pygame.image.load('Clubs 1.png'), pygame.image.load('Clubs 2.png'), pygame.image.load('Clubs 3.png'),
pygame.image.load('Clubs 4.png'), pygame.image.load('Clubs 5.png'), pygame.image.load('Clubs 6.png'),
pygame.image.load('Clubs 7.png'),
pygame.image.load('Clubs 8.png'), pygame.image.load('Clubs 9.png'), pygame.image.load('Clubs 10.png'),
pygame.image.load('Clubs 11.png'),
pygame.image.load('Clubs 12.png'), pygame.image.load('Clubs 13.png')]
deck_diamonds = [pygame.image.load('Diamond 1.png'), pygame.image.load('Diamond 2.png'),
pygame.image.load('Diamond 3.png'), pygame.image.load('Diamond 4.png'),
pygame.image.load('Diamond 5.png'), pygame.image.load('Diamond 6.png'),
pygame.image.load('Diamond 7.png'), pygame.image.load('Diamond 8.png'),
pygame.image.load('Diamond 9.png'),
pygame.image.load('Diamond 10.png'), pygame.image.load('Diamond 11.png'),
pygame.image.load('Diamond 12.png'), pygame.image.load('Diamond 13.png')]
deck_hearts = [pygame.image.load('Hearts 1.png'), pygame.image.load('Hearts 2.png'), pygame.image.load('Hearts 3.png'),
pygame.image.load('Hearts 4.png'), pygame.image.load('Hearts 5.png'), pygame.image.load('Hearts 6.png'),
pygame.image.load('Hearts 7.png'),
pygame.image.load('Hearts 8.png'), pygame.image.load('Hearts 9.png'), pygame.image.load('Hearts 10.png'),
pygame.image.load('Hearts 11.png'), pygame.image.load('Hearts 12.png'),
pygame.image.load('Hearts 13.png')]
deck_spades = [pygame.image.load('Spades 1.png'), pygame.image.load('Spades 2.png'), pygame.image.load('Spades 3.png'),
pygame.image.load('Spades 4.png'), pygame.image.load('Spades 5.png'), pygame.image.load('Spades 6.png'),
pygame.image.load('Spades 7.png'),
pygame.image.load('Spades 8.png'), pygame.image.load('Spades 9.png'), pygame.image.load('Spades 10.png'),
pygame.image.load('Spades 11.png'),
pygame.image.load('Spades 12.png'), pygame.image.load('Spades 13.png')]
d_intro_suit = pygame.image.load('diamonds.png')
i_intro_suit = pygame.image.load('clubs.png')
g_intro_suit = pygame.image.load('hearts.png')
u_intro_suit = pygame.image.load('spades.png')
card_back = pygame.image.load('Back Red 1.png')
player_turn = pygame.image.load('player_turn.png')
player_trump = pygame.image.load('player_trump.png')
comp_turn = pygame.image.load('comp_turn.png')
comp_trump = pygame.image.load('comp_trump.png')
first = [num for num in range(20, 920 + 75, 75)]
second = [num for num in range(67, 967 + 75, 75)]
card_images = [deck_hearts, deck_clubs, deck_spades, deck_diamonds]
player_deck_rectangle = pygame.Rect(20, 540, 900, 100)
player_deck_bg = pygame.image.load('ply_deck_bg.jpg')
comp_deck_rectangle = pygame.Rect(40, 150, 50, 405)
comp_deck = pygame.image.load('comp_deck.jpg')
comp_tm_deck_rectangle = pygame.Rect(900, 150, 50, 405)
comp_tm_deck = pygame.image.load('comp_tm_deck.jpg')
player_tm_deck_rectangle = pygame.Rect(350, 60, 610, 65)
player_tm_deck = pygame.image.load('ply_tm_deck.jpg')
play_area_rectangle = pygame.Rect(430, 267, 62, 72)
play_area = pygame.image.load('play_area.jpg')
# py_game Functions
# Initialize_game functions
def display_player_names():
win = pygame.display.set_mode((1000, 668))
win.blit(background, (0, 0))
win.blit(player_name, (450, 630))
win.blit(player_teammate, (400, 20))
win.blit(computer, (10, 300))
win.blit(comp_team_mate, (970, 220))
def player_deck_clear():
win.blit(player_deck_bg, player_deck_rectangle)
win.blit(player_name, (450, 630))
def comp_deck_clear():
win.blit(comp_deck, comp_deck_rectangle)
def comp_tm_deck_clear():
win.blit(comp_tm_deck, comp_tm_deck_rectangle)
def player_tm_deck_clear():
win.blit(player_tm_deck, player_tm_deck_rectangle)
def play_area_clear():
win.blit(play_area, play_area_rectangle)
def clear_all_decks():
player_deck_clear()
comp_deck_clear()
comp_tm_deck_clear()
player_tm_deck_clear()
play_area_clear()
def start_new_round():
global player_round_done, player_tm_round_done, comp_round_done, comp_tm_round_done
if player_round_done and player_tm_round_done and comp_round_done and comp_tm_round_done:
player_round_done = False
player_tm_round_done = False
comp_round_done = False
comp_tm_round_done = False
def match_cards(i, x, y):
k = 0
j = 0
for s in suits:
if i.suit == s:
for v in values:
if i.value == v:
win.blit(card_images[j][k], (x, y))
if k <= 12:
k += 1
if j <= 3:
j += 1
def card_assignment_for_players(x_limit, no_comp, no_comptm, no_plytm, comp_limit, comptm_limit, playertm_limit):
global player_hand
x = 20
y = 550
computer_y = 150
computer_teammate_y = 150
player_teammate_x = 350
for i in player_hand:
match_cards(i, x, y)
x += 75
if x > x_limit:
x = 20
for card in range(no_comp):
win.blit(card_back, (40, computer_y))
computer_y += 20
if computer_y > comp_limit:
computer_y = 150
for card in range(no_comptm):
win.blit(card_back, (900, computer_teammate_y))
computer_teammate_y += 20
if computer_teammate_y > comptm_limit:
computer_teammate_y = 150
for card in range(no_plytm):
win.blit(card_back, (player_teammate_x, 60))
player_teammate_x += 20
if player_teammate_x > playertm_limit:
player_teammate_x = 350
def player_select_trump():
global player_trump_selected, trump, run
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if 550 <= event.pos[1] <= 612:
for i in range(len(first)):
if first[i] <= event.pos[0] <= second[i]:
trump = player_hand[i].suit
player_trump_selected = True
show_trump = font.render('TRUMP IS ' + trump.upper(), True, (0, 150, 255))
win.blit(show_trump, (20, 60))
win.blit(player_turn, (700, 480))
def comp_select_trump():
global comp_trump_selected, trump
hearts = 0
spades = 0
diamonds = 0
clubs = 0
for i in computer_hand:
if i.suit == 'hearts':
hearts += 1
if i.suit == 'spades':
spades += 1
if i.suit == 'clubs':
clubs += 1
if i.suit == 'diamonds':
diamonds += 1
if hearts >= spades and hearts >= clubs and hearts >= diamonds:
trump = 'hearts'
if spades >= hearts and spades >= clubs and spades >= diamonds:
trump = 'spades'
if clubs >= hearts and clubs >= spades and clubs >= diamonds:
trump = 'clubs'
if diamonds >= hearts and diamonds >= spades and diamonds >= clubs:
trump = 'diamonds'
show_trump = font.render('TRUMP IS ' + trump.upper(), True, (255, 255, 255))
win.blit(show_trump, (20, 60))
win.blit(comp_turn, (90, 180))
comp_trump_selected = True
def trump_movement():
global player_trump_x, player_trump_change
win.blit(player_trump, (player_trump_x, 480))
player_trump_x += player_trump_change
if player_trump_x > 800:
player_trump_change = -1
elif player_trump_x < 750:
player_trump_change = 1
def determine_trumps():
if Trumps:
win.blit(player_turn, (700, 480))
trump_movement()
player_select_trump()
elif Trumps is False:
comp_select_trump()
card_assignment_for_players(920, 13, 13, 13, 390, 390, 590)
def initialize_game():
if player_trump_selected is False and comp_trump_selected is False:
display_player_names()
card_assignment_for_players(920, 13, 13, 13, 390, 390, 590)
determine_trumps()
# Round Functions
def initial_layout():
display_player_names()
show_trump = font.render('TRUMP IS ' + trump.upper(), True, (255, 255, 255))
win.blit(show_trump, (20, 60))
def trump_list():
global trump_list_chosen
if player_trump_selected or comp_trump_selected:
for i in computer_hand:
if i.suit == trump:
comp_trumps.append(i)
computer_hand.remove(i)
for j in computer_teammate_hand:
if j.suit == trump:
comp_teammate_trumps.append(j)
computer_teammate_hand.remove(j)
for k in player_teammate_hand:
if k.suit == trump:
player_teammate_trumps.append(k)
player_teammate_hand.remove(k)
trump_list_chosen = True
def check_cheat(rnd, card):
if comp_round_done:
for j in player_hand:
if winner is None or winner == 'comp':
if j.suit == comp_played_hands[rnd].suit:
if card.suit != comp_played_hands[rnd].suit:
return True
elif winner == 'comp_tm':
if j.suit == comp_teammate_played_hands[rnd].suit:
if card.suit != comp_teammate_played_hands[rnd].suit:
return True
elif winner == 'player_tm':
if j.suit == player_teammate_played_hands[rnd].suit:
if card.suit != player_teammate_played_hands[rnd].suit:
return True
return False
if not comp_round_done:
return False
def player_play(cl, rnd, turn, a, b, c, d, e, f, g):
global player_round_done, run
rnd -= 1
# event = pygame.event.wait()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if 550 <= event.pos[1] <= 612:
for i in range(turn):
if first[i] <= event.pos[0] <= second[i]:
if cl:
clear_all_decks()
win.blit(player_turn, (700, 480))
if check_cheat(rnd, player_hand[i]):
player_play(cl, rnd, turn, a, b, c, d, e, f, g)
elif check_cheat(rnd, player_hand[i]) is False:
match_cards(player_hand[i], 450, 330)
player_played_hands.append(player_hand.pop(i))
player_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
player_round_done = True
def have_suits(hand, s):
for i in hand:
if i.suit == s:
return True
return False
def check_least_high_card(c, hand):
n = 0
for s in suits:
for m in range(13):
if c.suit == s and c.value == values[n]:
for k in range(13):
for i in hand:
if n == 0:
return None
elif n == 12:
if i.suit == s and i.value == 'A':
return i
elif 0 < n < 12:
if i.suit == s and i.value == values[n + 1]:
return i
if n == 12:
break
n += 1
if n > 12:
n = 0
break
if n == 12:
break
n += 1
n = 0
def check_lowest_value_card_of_suit(hand, s):
n = 0
for k in range(13):
for i in hand:
if i.suit == s:
if i.value == values[n + 1]:
return i
n += 1
if n == 12:
for j in hand:
if j.suit == s:
if j.value == 'A':
return j
if n > 12:
return None
def check_lowest_value_card(hand):
n = 0
for k in range(13):
for i in hand:
if i.value == values[n + 1]:
return i
n += 1
if n == 12:
for j in hand:
if j.value == 'A':
return j
if n > 12:
return None
def check_highest_value_card_of_suit(hand, s):
n = 13
for j in hand:
if j.suit == s:
if j.value == "A":
return j
for k in range(13):
for i in hand:
if i.suit == s:
if i.value == values[n - 1]:
return i
n -= 1
if n < 2:
return None
def check_highest_value_card(hand):
n = 13
for j in hand:
if j.value == "A":
return j
for k in range(13):
for i in hand:
if i.value == values[n - 1]:
return i
n -= 1
if n < 2:
return None
def comp_tm_set_play(played_trumps, card, a, b, c, d, e, f, g):
global comp_tm_round_done
if played_trumps is False:
match_cards(card, 470, 300)
comp_teammate_played_hands.append(computer_teammate_hand.pop(computer_teammate_hand.index(card)))
comp_tm_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
comp_tm_round_done = True
elif played_trumps:
match_cards(card, 470, 300)
comp_teammate_played_hands.append(comp_teammate_trumps.pop(comp_teammate_trumps.index(card)))
comp_tm_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
comp_tm_round_done = True
def player_tm_set_play(played_trumps, card, a, b, c, d, e, f, g):
global player_tm_round_done
if played_trumps is False:
match_cards(card, 450, 270)
player_teammate_played_hands.append(player_teammate_hand.pop(player_teammate_hand.index(card)))
player_tm_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
player_tm_round_done = True
elif played_trumps:
match_cards(card, 450, 270)
player_teammate_played_hands.append(player_teammate_trumps.pop(player_teammate_trumps.index(card)))
player_tm_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
player_tm_round_done = True
def comp_set_play(played_trumps, card, a, b, c, d, e, f, g):
global comp_round_done
if played_trumps is False:
match_cards(card, 430, 300)
comp_played_hands.append(computer_hand.pop(computer_hand.index(card)))
comp_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
comp_round_done = True
elif played_trumps:
match_cards(card, 430, 300)
comp_played_hands.append(comp_trumps.pop(comp_trumps.index(card)))
comp_deck_clear()
card_assignment_for_players(a, b, c, d, e, f, g)
comp_round_done = True
def comp_tm_play(rnd, ply_tm_card_pos):
x_limit = 920 - (75 * rnd)
cards = 13 - rnd
card_pos = 390 - (20 * rnd)
rnd -= 1
c_pos = len(comp_played_hands)
p_pos = len(player_played_hands)
ct_pos = len(comp_teammate_played_hands)
pt_pos = len(player_teammate_played_hands)
if ct_pos == rnd and pt_pos == rnd + 1 and c_pos == rnd + 1 and p_pos == rnd + 1:
a = player_teammate_played_hands[rnd]
b = comp_played_hands[rnd]
c = player_played_hands[rnd]
if a.suit != trump and b.suit != trump and c.suit != trump:
if a.suit == b.suit == c.suit:
if have_suits(computer_teammate_hand, a.suit):
d = compare_cards(compare_cards(a, b), c)
d0 = check_least_high_card(d, computer_teammate_hand)
if d0 is not None:
comp_tm_set_play(False, d0, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d0 is None:
d1 = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, d1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
d2 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, d2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
d3 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, d3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == b.suit and b.suit != c.suit:
if have_suits(computer_teammate_hand, a.suit):
d4 = check_least_high_card(compare_cards(a, b), computer_teammate_hand)
if d4 is not None:
comp_tm_set_play(False, d4, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d4 is None:
d5 = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, d5, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
d6 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, d6, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
d7 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, d7, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == c.suit and c.suit != b.suit:
if have_suits(computer_teammate_hand, a.suit):
d8 = check_least_high_card(compare_cards(a, c), computer_teammate_hand)
if d8 is not None:
comp_tm_set_play(False, d8, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d8 is None:
d9 = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, d9, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
d10 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, d10, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
d11 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, d11, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif a.suit != b.suit and b.suit == c.suit:
if have_suits(computer_teammate_hand, a.suit):
w = check_least_high_card(a, computer_teammate_hand)
if w is not None:
comp_tm_set_play(False, w, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif w is None:
w1 = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, w1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
w2 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, w2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
w3 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, w3, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif a.suit != b.suit != c.suit:
if have_suits(computer_teammate_hand, a.suit):
d12 = check_least_high_card(a, computer_teammate_hand)
if d12 is not None:
comp_tm_set_play(False, d12, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif d12 is None:
d13 = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, d13, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
d14 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, d14, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
d15 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, d15, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if a.suit != trump and b.suit != trump and c.suit == trump:
if have_suits(computer_teammate_hand, a.suit):
e = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, e, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
e1 = check_least_high_card(c, comp_teammate_trumps)
if e1 is not None:
comp_tm_set_play(True, e1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif e1 is None:
e2 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, e2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
e3 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, e3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit == trump and c.suit != trump:
if have_suits(computer_teammate_hand, a.suit):
f = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, f, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
f1 = check_least_high_card(b, comp_teammate_trumps)
if f1 is not None:
comp_tm_set_play(True, f1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif f1 is None:
f2 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, f2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
f3 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, f3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit != trump and c.suit != trump:
if len(comp_teammate_trumps) > 0:
g = check_least_high_card(a, comp_teammate_trumps)
if g is not None:
comp_tm_set_play(True, g, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif g is None:
g1 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, g1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
g2 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, g2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit == trump and c.suit != trump:
if len(comp_teammate_trumps) > 0:
h = check_least_high_card(compare_cards(a, b), comp_teammate_trumps)
if h is not None:
comp_tm_set_play(True, h, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if h is None:
h1 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, h1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
h2 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, h2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit != trump and c.suit == trump:
if len(comp_teammate_trumps) > 0:
i = check_least_high_card(compare_cards(a, c), comp_teammate_trumps)
if i is not None:
comp_tm_set_play(True, i, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if i is None:
i1 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, i1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
i2 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, i2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit == trump and c.suit == trump:
if len(comp_teammate_trumps) > 0:
j = check_least_high_card(compare_cards(b, c), comp_teammate_trumps)
if j is not None:
comp_tm_set_play(True, j, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if j is None:
j1 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, j1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
j2 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, j2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit == trump and c.suit == trump:
if len(comp_teammate_trumps) > 0:
k = check_least_high_card(compare_cards(compare_cards(a, b), c), comp_teammate_trumps)
if k is not None:
comp_tm_set_play(True, k, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if k is None:
k1 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, k1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
k2 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, k2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif ct_pos == rnd and pt_pos == rnd and c_pos == rnd + 1 and p_pos == rnd + 1:
a = comp_played_hands[rnd]
b = player_played_hands[rnd]
if a.suit != trump and b.suit != trump:
if a.suit == b.suit:
if have_suits(computer_teammate_hand, a.suit):
c = compare_cards(a, b)
d = check_least_high_card(c, computer_teammate_hand)
if d is not None:
comp_tm_set_play(False, d, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif d is None:
e = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, e, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
a1 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, a1, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
z1 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, z1, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif a.suit != b.suit:
if have_suits(computer_teammate_hand, a.suit):
b1 = check_least_high_card(a, computer_teammate_hand)
if b1 is not None:
comp_tm_set_play(False, b1, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif b1 is None:
b2 = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, b2, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
b3 = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, b3, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
b4 = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, b4, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif a.suit != trump and b.suit == trump:
if have_suits(computer_teammate_hand, a.suit):
g = check_lowest_value_card_of_suit(computer_teammate_hand, a.suit)
comp_tm_set_play(False, g, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_teammate_hand, a.suit) is False:
if len(comp_teammate_trumps) > 0:
h = check_least_high_card(b, comp_teammate_trumps)
if h is not None:
comp_tm_set_play(True, h, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif h is None:
i = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, i, x_limit, cards, cards, cards + 1, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
j = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, j, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == trump and b.suit != trump:
if len(comp_teammate_trumps) > 0:
k = check_least_high_card(a, comp_teammate_trumps)
if k is not None:
comp_tm_set_play(True, k, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif k is None:
m = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, m, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
n = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, n, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == trump and b.suit == trump:
if len(comp_teammate_trumps) > 0:
p = check_least_high_card(compare_cards(a, b), comp_teammate_trumps)
if p is not None:
comp_tm_set_play(True, p, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif p is None:
q = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, q, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
r = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, r, x_limit, cards, cards, cards + 1, card_pos, card_pos, ply_tm_card_pos)
elif ct_pos == rnd and pt_pos == rnd and c_pos == rnd and p_pos == rnd + 1:
s = player_played_hands[rnd]
if s.suit != trump:
if have_suits(computer_teammate_hand, s.suit):
t = check_least_high_card(s, computer_teammate_hand)
if t is not None:
comp_tm_set_play(False, t, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
elif t is None:
t1 = check_lowest_value_card_of_suit(computer_teammate_hand, s.suit)
comp_tm_set_play(False, t1, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
elif have_suits(computer_teammate_hand, s.suit) is False:
if len(comp_teammate_trumps) > 0:
u = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, u, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
v = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, v, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
elif s.suit == trump:
if len(comp_teammate_trumps) > 0:
w = check_least_high_card(s, comp_teammate_trumps)
if w is not None:
comp_tm_set_play(True, w, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
if w is None:
x = check_lowest_value_card(comp_teammate_trumps)
comp_tm_set_play(True, x, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(comp_teammate_trumps) == 0:
y = check_lowest_value_card(computer_teammate_hand)
comp_tm_set_play(False, y, x_limit, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
elif ct_pos == rnd and pt_pos == rnd and c_pos == rnd and p_pos == rnd:
if len(computer_teammate_hand) == 0:
kk_1 = random.choice(comp_teammate_trumps)
comp_tm_set_play(True, kk_1, x_limit + 75, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
if len(computer_teammate_hand) > 0:
k = random.choice(computer_teammate_hand)
comp_tm_set_play(False, k, x_limit + 75, cards + 1, cards, cards + 1, card_pos + 20, card_pos,
ply_tm_card_pos)
def player_tm_play(rnd, ply_tm_card_pos):
x_limit = 920 - (75 * rnd)
cards = 13 - rnd
card_pos = 390 - (20 * rnd)
rnd -= 1
c_pos = len(comp_played_hands)
p_pos = len(player_played_hands)
ct_pos = len(comp_teammate_played_hands)
pt_pos = len(player_teammate_played_hands)
if pt_pos == rnd and c_pos == rnd + 1 and p_pos == rnd + 1 and ct_pos == rnd + 1:
a = comp_played_hands[rnd]
b = player_played_hands[rnd]
c = comp_teammate_played_hands[rnd]
if a.suit != trump and b.suit != trump and c.suit != trump:
if a.suit == b.suit == c.suit:
if have_suits(player_teammate_hand, a.suit):
d = compare_cards(compare_cards(a, b), c)
d0 = check_least_high_card(d, player_teammate_hand)
if d0 is not None:
player_tm_set_play(False, d0, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d0 is None:
d1 = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, d1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
d2 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, d2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
d3 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, d3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == b.suit and b.suit != c.suit:
if have_suits(player_teammate_hand, a.suit):
d4 = check_least_high_card(compare_cards(a, b), player_teammate_hand)
if d4 is not None:
player_tm_set_play(False, d4, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d4 is None:
d5 = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, d5, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
d6 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, d6, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
d7 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, d7, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == c.suit and c.suit != b.suit:
if have_suits(player_teammate_hand, a.suit):
d8 = check_least_high_card(compare_cards(a, c), player_teammate_hand)
if d8 is not None:
player_tm_set_play(False, d8, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d8 is None:
d9 = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, d9, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
d10 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, d10, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
d11 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, d11, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif a.suit != b.suit and b.suit == c.suit:
if have_suits(player_teammate_hand, a.suit):
w = check_least_high_card(a, player_teammate_hand)
if w is not None:
player_tm_set_play(False, w, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif w is None:
w1 = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, w1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
w2 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, w2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
w3 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, w3, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif a.suit != b.suit != c.suit:
if have_suits(player_teammate_hand, a.suit):
d12 = check_least_high_card(a, player_teammate_hand)
if d12 is not None:
player_tm_set_play(False, d12, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif d12 is None:
d13 = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, d13, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
d14 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, d14, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
d15 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, d15, x_limit, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if a.suit != trump and b.suit != trump and c.suit == trump:
if have_suits(player_teammate_hand, a.suit):
e = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, e, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
e1 = check_least_high_card(c, player_teammate_trumps)
if e1 is not None:
player_tm_set_play(True, e1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif e1 is None:
e2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, e2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
e3 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, e3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit == trump and c.suit != trump:
if have_suits(player_teammate_hand, a.suit):
f = check_lowest_value_card_of_suit(player_teammate_hand, a.suit)
player_tm_set_play(False, f, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, a.suit) is False:
if len(player_teammate_trumps) > 0:
f1 = check_least_high_card(b, player_teammate_trumps)
if f1 is not None:
player_tm_set_play(True, f1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif f1 is None:
f2 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, f2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
f3 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, f3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit != trump and c.suit != trump:
if len(player_teammate_trumps) > 0:
g = check_least_high_card(a, player_teammate_trumps)
if g is not None:
player_tm_set_play(True, g, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif g is None:
g1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, g1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
g2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, g2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit == trump and c.suit != trump:
if len(player_teammate_trumps) > 0:
h = check_least_high_card(compare_cards(a, b), player_teammate_trumps)
if h is not None:
player_tm_set_play(True, h, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if h is None:
h1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, h1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
h2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, h2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit != trump and c.suit == trump:
if len(player_teammate_trumps) > 0:
i = check_least_high_card(compare_cards(a, c), player_teammate_trumps)
if i is not None:
player_tm_set_play(True, i, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if i is None:
i1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, i1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
i2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, i2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit == trump and c.suit == trump:
if len(player_teammate_trumps) > 0:
j = check_least_high_card(compare_cards(b, c), player_teammate_trumps)
if j is not None:
player_tm_set_play(True, j, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if j is None:
j1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, j1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
j2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, j2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit == trump and c.suit == trump:
if len(player_teammate_trumps) > 0:
k = check_least_high_card(compare_cards(compare_cards(a, b), c), player_teammate_trumps)
if k is not None:
player_tm_set_play(True, k, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if k is None:
k1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, k1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
k2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, k2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif pt_pos == rnd and c_pos == rnd and p_pos == rnd + 1 and ct_pos == rnd + 1:
s = player_played_hands[rnd]
h = comp_teammate_played_hands[rnd]
if s.suit != trump and h.suit != trump:
if s.suit == h.suit:
if have_suits(player_teammate_hand, s.suit):
m = check_least_high_card(compare_cards(s, h), player_teammate_hand)
if m is not None:
player_tm_set_play(False, m, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif m is None:
m1 = check_lowest_value_card_of_suit(player_teammate_hand, s.suit)
player_tm_set_play(False, m1, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif have_suits(player_teammate_hand, s.suit) is False:
if len(player_teammate_trumps) > 0:
m2 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, m2, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
m3 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, m3, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif s.suit != h.suit:
if have_suits(player_teammate_hand, s.suit):
m4 = check_least_high_card(s, player_teammate_hand)
if m4 is not None:
player_tm_set_play(False, m4, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
if m4 is None:
m5 = check_lowest_value_card_of_suit(player_teammate_hand, s.suit)
player_tm_set_play(False, m5, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif have_suits(player_teammate_hand, s.suit) is False:
if len(player_teammate_trumps) > 0:
m6 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, m6, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
m7 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, m7, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
if s.suit != trump and h.suit == trump:
if have_suits(player_teammate_hand, s.suit):
n = check_lowest_value_card_of_suit(player_teammate_hand, s.suit)
player_tm_set_play(False, n, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos, ply_tm_card_pos)
elif have_suits(player_teammate_hand, s.suit) is False:
if len(player_teammate_trumps) > 0:
n1 = check_least_high_card(h, player_teammate_trumps)
if n1 is not None:
player_tm_set_play(True, n1, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
if n1 is None:
n2 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, n2, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
n3 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, n3, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
if s.suit == trump and h.suit != trump:
if len(player_teammate_trumps) > 0:
o = check_least_high_card(s, player_teammate_trumps)
if o is not None:
player_tm_set_play(True, o, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif o is None:
o1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, o1, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
o2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, o2, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
if s.suit == trump and h.suit == trump:
if len(player_teammate_trumps) > 0:
p = check_least_high_card(compare_cards(s, h), player_teammate_trumps)
if p is not None:
player_tm_set_play(True, p, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif p is None:
p1 = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, p1, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
p2 = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, p2, x_limit, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif pt_pos == rnd and c_pos == rnd and p_pos == rnd and ct_pos == rnd + 1:
s = comp_teammate_played_hands[rnd]
if s.suit != trump:
if have_suits(player_teammate_hand, s.suit):
t = check_least_high_card(s, player_teammate_hand)
if t is not None:
player_tm_set_play(False, t, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif t is None:
t1 = check_lowest_value_card_of_suit(player_teammate_hand, s.suit)
player_tm_set_play(False, t1, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif have_suits(player_teammate_hand, s.suit) is False:
if len(player_teammate_trumps) > 0:
u = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, u, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
v = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, v, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif s.suit == trump:
if len(player_teammate_trumps) > 0:
w = check_least_high_card(s, player_teammate_trumps)
if w is not None:
player_tm_set_play(True, w, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
if w is None:
x = check_lowest_value_card(player_teammate_trumps)
player_tm_set_play(True, x, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
y = check_lowest_value_card(player_teammate_hand)
player_tm_set_play(False, y, x_limit + 75, cards + 1, cards, cards, card_pos + 20, card_pos,
ply_tm_card_pos)
elif pt_pos == rnd and c_pos == rnd and p_pos == rnd and ct_pos == rnd:
if len(player_teammate_hand) == 0:
kk_1 = random.choice(player_teammate_trumps)
player_tm_set_play(True, kk_1, x_limit + 75, cards + 1, cards + 1, cards, card_pos + 20, card_pos + 20,
ply_tm_card_pos)
if len(player_teammate_hand) > 0:
k = random.choice(player_teammate_hand)
player_tm_set_play(False, k, x_limit + 75, cards + 1, cards + 1, cards, card_pos + 20, card_pos + 20,
ply_tm_card_pos)
def comp_play(first_play, rnd, ply_tm_card_pos):
global comp_round_done
x_limit = 920 - (75 * rnd)
cards = 13 - rnd
card_pos = 390 - (20 * rnd)
rnd -= 1
c_pos = len(comp_played_hands)
p_pos = len(player_played_hands)
ct_pos = len(comp_teammate_played_hands)
pt_pos = len(player_teammate_played_hands)
if first_play:
clear_all_decks()
if len(computer_hand) == 0:
kk_1 = random.choice(comp_trumps)
comp_set_play(True, kk_1, x_limit + 75, cards, cards + 1, cards + 1, card_pos, card_pos + 20,
ply_tm_card_pos)
if len(computer_hand) > 0:
k = random.choice(computer_hand)
comp_set_play(False, k, x_limit + 75, cards, cards + 1, cards + 1, card_pos, card_pos + 20, ply_tm_card_pos)
elif not first_play:
if c_pos == rnd and p_pos == rnd + 1 and ct_pos == rnd + 1 and pt_pos == rnd + 1:
a = player_played_hands[rnd]
b = comp_teammate_played_hands[rnd]
c = player_teammate_played_hands[rnd]
if a.suit != trump and b.suit != trump and c.suit != trump:
if a.suit == b.suit == c.suit:
if have_suits(computer_hand, a.suit):
d = compare_cards(compare_cards(a, b), c)
d0 = check_least_high_card(d, computer_hand)
if d0 is not None:
comp_set_play(False, d0, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d0 is None:
d1 = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, d1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
d2 = check_lowest_value_card(comp_trumps)
comp_set_play(True, d2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
d3 = check_lowest_value_card(computer_hand)
comp_set_play(False, d3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == b.suit and b.suit != c.suit:
if have_suits(computer_hand, a.suit):
d4 = check_least_high_card(compare_cards(a, b), computer_hand)
if d4 is not None:
comp_set_play(False, d4, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d4 is None:
d5 = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, d5, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
d6 = check_lowest_value_card(comp_trumps)
comp_set_play(True, d6, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
d7 = check_lowest_value_card(computer_hand)
comp_set_play(False, d7, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit == c.suit and c.suit != b.suit:
if have_suits(computer_hand, a.suit):
d8 = check_least_high_card(compare_cards(a, c), computer_hand)
if d8 is not None:
comp_set_play(False, d8, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d8 is None:
d9 = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, d9, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
d10 = check_lowest_value_card(comp_trumps)
comp_set_play(True, d10, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
d11 = check_lowest_value_card(computer_hand)
comp_set_play(False, d11, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit != b.suit and b.suit == c.suit:
if have_suits(computer_hand, a.suit):
m = check_least_high_card(a, computer_hand)
if m is not None:
comp_set_play(False, m, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif m is None:
m1 = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, m1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
m2 = check_lowest_value_card(comp_trumps)
comp_set_play(True, m2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
m3 = check_lowest_value_card(computer_hand)
comp_set_play(False, m3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif a.suit != b.suit != c.suit:
if have_suits(computer_hand, a.suit):
d12 = check_least_high_card(a, computer_hand)
if d12 is not None:
comp_set_play(False, d12, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif d12 is None:
d13 = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, d13, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
d14 = check_lowest_value_card(comp_trumps)
comp_set_play(True, d14, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
d15 = check_lowest_value_card(computer_hand)
comp_set_play(False, d15, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit != trump and c.suit == trump:
if have_suits(computer_hand, a.suit):
e = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, e, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
e1 = check_least_high_card(c, comp_trumps)
if e1 is not None:
comp_set_play(True, e1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif e1 is None:
e2 = check_lowest_value_card(computer_hand)
comp_set_play(False, e2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
e3 = check_lowest_value_card(computer_hand)
comp_set_play(False, e3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit == trump and c.suit != trump:
if have_suits(computer_hand, a.suit):
f = check_lowest_value_card_of_suit(computer_hand, a.suit)
comp_set_play(False, f, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif have_suits(computer_hand, a.suit) is False:
if len(comp_trumps) > 0:
f1 = check_least_high_card(b, comp_trumps)
if f1 is not None:
comp_set_play(True, f1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif f1 is None:
f2 = check_lowest_value_card(comp_trumps)
comp_set_play(True, f2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
f3 = check_lowest_value_card(computer_hand)
comp_set_play(False, f3, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit != trump and c.suit != trump:
if len(comp_trumps) > 0:
g = check_least_high_card(a, comp_trumps)
if g is not None:
comp_set_play(True, g, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif g is None:
g1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, g1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
g2 = check_lowest_value_card(computer_hand)
comp_set_play(False, g2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit == trump and c.suit != trump:
if len(comp_trumps) > 0:
h = check_least_high_card(compare_cards(a, b), comp_trumps)
if h is not None:
comp_set_play(True, h, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if h is None:
h1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, h1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
h2 = check_lowest_value_card(computer_hand)
comp_set_play(False, h2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit != trump and c.suit == trump:
if len(comp_trumps) > 0:
i = check_least_high_card(compare_cards(a, c), comp_trumps)
if i is not None:
comp_set_play(True, i, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if i is None:
i1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, i1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
i2 = check_lowest_value_card(computer_hand)
comp_set_play(False, i2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit != trump and b.suit == trump and c.suit == trump:
if len(comp_trumps) > 0:
j = check_least_high_card(compare_cards(b, c), comp_trumps)
if j is not None:
comp_set_play(True, j, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if j is None:
j1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, j1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
j2 = check_lowest_value_card(computer_hand)
comp_set_play(False, j2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if a.suit == trump and b.suit == trump and c.suit == trump:
if len(comp_trumps) > 0:
k = check_least_high_card(compare_cards(compare_cards(a, b), c), comp_trumps)
if k is not None:
comp_set_play(True, k, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
if k is None:
k1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, k1, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif len(comp_trumps) == 0:
k2 = check_lowest_value_card(computer_hand)
comp_set_play(False, k2, x_limit, cards, cards, cards, card_pos, card_pos, ply_tm_card_pos)
elif c_pos == rnd and p_pos == rnd and ct_pos == rnd + 1 and pt_pos == rnd + 1:
s = comp_teammate_played_hands[rnd]
h = player_teammate_played_hands[rnd]
if s.suit != trump and h.suit != trump:
if s.suit == h.suit:
if have_suits(computer_hand, s.suit):
m = check_least_high_card(compare_cards(s, h), computer_hand)
if m is not None:
comp_set_play(False, m, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif m is None:
m1 = check_lowest_value_card_of_suit(computer_hand, s.suit)
comp_set_play(False, m1, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(computer_hand, s.suit) is False:
if len(comp_trumps) > 0:
m2 = check_lowest_value_card(comp_trumps)
comp_set_play(True, m2, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif len(player_teammate_trumps) == 0:
m3 = check_lowest_value_card(computer_hand)
comp_set_play(False, m3, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif s.suit != h.suit:
if have_suits(computer_hand, s.suit):
m4 = check_least_high_card(s, computer_hand)
if m4 is not None:
comp_set_play(False, m4, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if m4 is None:
m5 = check_lowest_value_card_of_suit(computer_hand, s.suit)
comp_set_play(False, m5, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(computer_hand, s.suit) is False:
if len(comp_trumps) > 0:
m6 = check_lowest_value_card(comp_trumps)
comp_set_play(True, m6, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_trumps) == 0:
m7 = check_lowest_value_card(computer_hand)
comp_set_play(False, m7, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if s.suit != trump and h.suit == trump:
if have_suits(computer_hand, s.suit):
n = check_lowest_value_card_of_suit(computer_hand, s.suit)
comp_set_play(False, n, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif have_suits(computer_hand, s.suit) is False:
if len(comp_trumps) > 0:
n1 = check_least_high_card(h, comp_trumps)
if n1 is not None:
comp_set_play(True, n1, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if n1 is None:
n2 = check_lowest_value_card(comp_trumps)
comp_set_play(True, n2, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_trumps) == 0:
n3 = check_lowest_value_card(computer_hand)
comp_set_play(False, n3, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if s.suit == trump and h.suit != trump:
if len(comp_trumps) > 0:
o = check_least_high_card(s, comp_trumps)
if o is not None:
comp_set_play(True, o, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif o is None:
o1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, o1, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_trumps) == 0:
o2 = check_lowest_value_card(computer_hand)
comp_set_play(False, o2, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
if s.suit == trump and h.suit == trump:
if len(comp_trumps) > 0:
p = check_least_high_card(compare_cards(s, h), comp_trumps)
if p is not None:
comp_set_play(True, p, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif p is None:
p1 = check_lowest_value_card(comp_trumps)
comp_set_play(True, p1, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif len(comp_trumps) == 0:
p2 = check_lowest_value_card(computer_hand)
comp_set_play(False, p2, x_limit + 75, cards, cards, cards, card_pos, card_pos,
ply_tm_card_pos)
elif c_pos == rnd and p_pos == rnd and ct_pos == rnd and pt_pos == rnd + 1:
s = player_teammate_played_hands[rnd]
if s.suit != trump:
if have_suits(computer_hand, s.suit):
t = check_least_high_card(s, computer_hand)
if t is not None:
comp_set_play(False, t, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
elif t is None:
t1 = check_lowest_value_card_of_suit(computer_hand, s.suit)
comp_set_play(False, t1, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
elif have_suits(computer_hand, s.suit) is False:
if len(comp_trumps) > 0:
u = check_lowest_value_card(comp_trumps)
comp_set_play(True, u, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
elif len(comp_trumps) == 0:
v = check_lowest_value_card(computer_hand)
comp_set_play(False, v, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
elif s.suit == trump:
if len(comp_trumps) > 0:
w = check_least_high_card(s, comp_trumps)
if w is not None:
comp_set_play(True, w, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
if w is None:
x = check_lowest_value_card(comp_trumps)
comp_set_play(True, x, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
elif len(comp_trumps) == 0:
y = check_lowest_value_card(computer_hand)
comp_set_play(False, y, x_limit + 75, cards, cards + 1, cards, card_pos, card_pos + 20,
ply_tm_card_pos)
elif c_pos == rnd and p_pos == rnd and ct_pos == rnd and pt_pos == rnd:
if len(computer_hand) == 0:
kk_1 = random.choice(comp_trumps)
comp_set_play(True, kk_1, x_limit + 75, cards, cards + 1, cards + 1, card_pos, card_pos + 20,
ply_tm_card_pos)
if len(computer_hand) > 0:
k = random.choice(computer_hand)
comp_set_play(False, k, x_limit + 75, cards, cards + 1, cards + 1, card_pos, card_pos + 20,
ply_tm_card_pos)
def check_won_hand(rnd):
global comp_won_hands, player_won_hands, comp_tm_won_hands, player_tm_won_hands
rnd -= 1
compare_list = []
a = comp_played_hands[rnd]
compare_list.append(a)
b = player_played_hands[rnd]
compare_list.append(b)
c = comp_teammate_played_hands[rnd]
compare_list.append(c)
d = player_teammate_played_hands[rnd]
compare_list.append(d)
if (comp_trump_selected and winner is None)or winner == 'comp':
if a.suit != trump and b.suit != trump and c.suit != trump and d.suit != trump:
e = check_highest_value_card_of_suit(compare_list, a.suit)
if e == a:
comp_won_hands += 1
return 'comp'
elif e == b:
player_won_hands += 1
return 'player'
elif e == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit != trump and d.suit == trump:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit != trump:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit != trump:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit != trump:
comp_won_hands += 1
return 'comp'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit != trump:
e1 = compare_cards(a, b)
if e1 == a:
comp_won_hands += 1
return 'comp'
elif e1 == b:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit != trump:
e2 = compare_cards(a, c)
if e2 == a:
comp_won_hands += 1
return 'comp'
elif e2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit == trump:
e3 = compare_cards(a, d)
if e3 == a:
comp_won_hands += 1
return 'comp'
elif e3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit != trump:
e4 = compare_cards(b, c)
if e4 == b:
player_won_hands += 1
return 'player'
elif e4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit == trump:
e5 = compare_cards(b, d)
if e5 == b:
player_won_hands += 1
return 'player'
elif e5 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit == trump:
e6 = compare_cards(c, d)
if e6 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e6 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit != trump:
f = compare_cards(compare_cards(a, b), c)
if f == a:
comp_won_hands += 1
return 'comp'
elif f == b:
player_won_hands += 1
return 'player'
elif f == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit == trump:
f1 = compare_cards(compare_cards(a, b), d)
if f1 == a:
comp_won_hands += 1
return 'comp'
elif f1 == b:
player_won_hands += 1
return 'player'
elif f1 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit == trump:
f2 = compare_cards(compare_cards(a, c), d)
if f2 == a:
comp_won_hands += 1
return 'comp'
elif f2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f2 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit == trump:
f3 = compare_cards(compare_cards(b, c), d)
if f3 == b:
player_won_hands += 1
return 'player'
elif f3 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit == trump:
f4 = check_highest_value_card(compare_list)
if f4 == a:
comp_won_hands += 1
return 'comp'
elif f4 == b:
player_won_hands += 1
return 'player'
elif f4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f4 == d:
player_tm_won_hands += 1
return 'player_tm'
elif (player_trump_selected and winner is None) or winner == 'player':
if a.suit != trump and b.suit != trump and c.suit != trump and d.suit != trump:
e = check_highest_value_card_of_suit(compare_list, b.suit)
if e == a:
comp_won_hands += 1
return 'comp'
elif e == b:
player_won_hands += 1
return 'player'
elif e == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit != trump and d.suit == trump:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit != trump:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit != trump:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit != trump:
comp_won_hands += 1
return 'comp'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit != trump:
e1 = compare_cards(a, b)
if e1 == a:
comp_won_hands += 1
return 'comp'
elif e1 == b:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit != trump:
e2 = compare_cards(a, c)
if e2 == a:
comp_won_hands += 1
return 'comp'
elif e2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit == trump:
e3 = compare_cards(a, d)
if e3 == a:
comp_won_hands += 1
return 'comp'
elif e3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit != trump:
e4 = compare_cards(b, c)
if e4 == b:
player_won_hands += 1
return 'player'
elif e4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit == trump:
e5 = compare_cards(b, d)
if e5 == b:
player_won_hands += 1
return 'player'
elif e5 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit == trump:
e6 = compare_cards(c, d)
if e6 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e6 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit != trump:
f = compare_cards(compare_cards(a, b), c)
if f == a:
comp_won_hands += 1
return 'comp'
elif f == b:
player_won_hands += 1
return 'player'
elif f == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit == trump:
f1 = compare_cards(compare_cards(a, b), d)
if f1 == a:
comp_won_hands += 1
return 'comp'
elif f1 == b:
player_won_hands += 1
return 'player'
elif f1 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit == trump:
f2 = compare_cards(compare_cards(a, c), d)
if f2 == a:
comp_won_hands += 1
return 'comp'
elif f2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f2 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit == trump:
f3 = compare_cards(compare_cards(b, c), d)
if f3 == b:
player_won_hands += 1
return 'player'
elif f3 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit == trump:
f4 = check_highest_value_card(compare_list)
if f4 == a:
comp_won_hands += 1
return 'comp'
elif f4 == b:
player_won_hands += 1
return 'player'
elif f4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f4 == d:
player_tm_won_hands += 1
return 'player_tm'
elif (player_trump_selected or comp_trump_selected) and winner == 'comp_tm':
if a.suit != trump and b.suit != trump and c.suit != trump and d.suit != trump:
e = check_highest_value_card_of_suit(compare_list, c.suit)
if e == a:
comp_won_hands += 1
return 'comp'
elif e == b:
player_won_hands += 1
return 'player'
elif e == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit != trump and d.suit == trump:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit != trump:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit != trump:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit != trump:
comp_won_hands += 1
return 'comp'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit != trump:
e1 = compare_cards(a, b)
if e1 == a:
comp_won_hands += 1
return 'comp'
elif e1 == b:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit != trump:
e2 = compare_cards(a, c)
if e2 == a:
comp_won_hands += 1
return 'comp'
elif e2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit == trump:
e3 = compare_cards(a, d)
if e3 == a:
comp_won_hands += 1
return 'comp'
elif e3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit != trump:
e4 = compare_cards(b, c)
if e4 == b:
player_won_hands += 1
return 'player'
elif e4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit == trump:
e5 = compare_cards(b, d)
if e5 == b:
player_won_hands += 1
return 'player'
elif e5 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit == trump:
e6 = compare_cards(c, d)
if e6 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e6 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit != trump:
f = compare_cards(compare_cards(a, b), c)
if f == a:
comp_won_hands += 1
return 'comp'
elif f == b:
player_won_hands += 1
return 'player'
elif f == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit == trump:
f1 = compare_cards(compare_cards(a, b), d)
if f1 == a:
comp_won_hands += 1
return 'comp'
elif f1 == b:
player_won_hands += 1
return 'player'
elif f1 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit == trump:
f2 = compare_cards(compare_cards(a, c), d)
if f2 == a:
comp_won_hands += 1
return 'comp'
elif f2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f2 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit == trump:
f3 = compare_cards(compare_cards(b, c), d)
if f3 == b:
player_won_hands += 1
return 'player'
elif f3 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit == trump:
f4 = check_highest_value_card(compare_list)
if f4 == a:
comp_won_hands += 1
return 'comp'
elif f4 == b:
player_won_hands += 1
return 'player'
elif f4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f4 == d:
player_tm_won_hands += 1
return 'player_tm'
elif (player_trump_selected or comp_trump_selected) and winner == 'player_tm':
if a.suit != trump and b.suit != trump and c.suit != trump and d.suit != trump:
e = check_highest_value_card_of_suit(compare_list, d.suit)
if e == a:
comp_won_hands += 1
return 'comp'
elif e == b:
player_won_hands += 1
return 'player'
elif e == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit != trump and d.suit == trump:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit != trump:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit != trump:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit != trump:
comp_won_hands += 1
return 'comp'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit != trump:
e1 = compare_cards(a, b)
if e1 == a:
comp_won_hands += 1
return 'comp'
elif e1 == b:
player_won_hands += 1
return 'player'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit != trump:
e2 = compare_cards(a, c)
if e2 == a:
comp_won_hands += 1
return 'comp'
elif e2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit != trump and c.suit != trump and d.suit == trump:
e3 = compare_cards(a, d)
if e3 == a:
comp_won_hands += 1
return 'comp'
elif e3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit != trump:
e4 = compare_cards(b, c)
if e4 == b:
player_won_hands += 1
return 'player'
elif e4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit != trump and b.suit == trump and c.suit != trump and d.suit == trump:
e5 = compare_cards(b, d)
if e5 == b:
player_won_hands += 1
return 'player'
elif e5 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit != trump and c.suit == trump and d.suit == trump:
e6 = compare_cards(c, d)
if e6 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif e6 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit != trump:
f = compare_cards(compare_cards(a, b), c)
if f == a:
comp_won_hands += 1
return 'comp'
elif f == b:
player_won_hands += 1
return 'player'
elif f == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif a.suit == trump and b.suit == trump and c.suit != trump and d.suit == trump:
f1 = compare_cards(compare_cards(a, b), d)
if f1 == a:
comp_won_hands += 1
return 'comp'
elif f1 == b:
player_won_hands += 1
return 'player'
elif f1 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit != trump and c.suit == trump and d.suit == trump:
f2 = compare_cards(compare_cards(a, c), d)
if f2 == a:
comp_won_hands += 1
return 'comp'
elif f2 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f2 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit != trump and b.suit == trump and c.suit == trump and d.suit == trump:
f3 = compare_cards(compare_cards(b, c), d)
if f3 == b:
player_won_hands += 1
return 'player'
elif f3 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f3 == d:
player_tm_won_hands += 1
return 'player_tm'
elif a.suit == trump and b.suit == trump and c.suit == trump and d.suit == trump:
f4 = check_highest_value_card(compare_list)
if f4 == a:
comp_won_hands += 1
return 'comp'
elif f4 == b:
player_won_hands += 1
return 'player'
elif f4 == c:
comp_tm_won_hands += 1
return 'comp_tm'
elif f4 == d:
player_tm_won_hands += 1
return 'player_tm'
def comp_win_cords():
global x1, y1
x1 += 80
if x1 > 330:
x1 = 90
y1 = 325
elif x1 > 330 and y1 == 315:
x1 = 90
y1 = 395
elif x1 > 330 and y1 == 385:
x1 = 90
y1 = 465
elif x1 > 330 and y1 == 455:
x1 = 90
y1 = 535
def comp_tm_win_cords():
global x3, y3
x3 -= 80
if x3 < 580:
x3 = 820
y3 = 325
elif x3 < 580 and y3 == 315:
x3 = 820
y3 = 395
elif x3 < 580 and y3 == 385:
x3 = 820
y3 = 465
elif x3 < 580 and y3 == 455:
x3 = 820
y3 = 535
def player_win_cord():
global x2, y2
x2 += 80
if x2 > 570:
x2 = 250
y2 = 415
elif x2 > 570 and y2 == 415:
x2 = 250
y2 = 345
def player_tm_win_cord():
global x4, y4
x4 += 80
if x4 > 570:
x4 = 250
y4 = 195
elif x4 > 410 and y4 == 200:
x4 = 250
y4 = 265
def set_won_hand(rnd, hand_winner):
global x1, y1, x2, y2, x3, y3, x4, y4
rnd -= 1
if comp_trump_selected:
if hand_winner == 'comp':
match_cards(comp_played_hands[rnd], x1, y1)
match_cards(player_played_hands[rnd], x1 + 10, y1)
match_cards(comp_teammate_played_hands[rnd], x1 + 20, y1)
match_cards(player_teammate_played_hands[rnd], x1 + 30, y1)
comp_win_cords()
elif hand_winner == 'player':
match_cards(comp_played_hands[rnd], x2, y2)
match_cards(player_played_hands[rnd], x2 + 10, y2)
match_cards(comp_teammate_played_hands[rnd], x2 + 20, y2)
match_cards(player_teammate_played_hands[rnd], x2 + 30, y2)
player_win_cord()
elif hand_winner == 'comp_tm':
match_cards(comp_played_hands[rnd], x3, y3)
match_cards(player_played_hands[rnd], x3 + 10, y3)
match_cards(comp_teammate_played_hands[rnd], x3 + 20, y3)
match_cards(player_teammate_played_hands[rnd], x3 + 30, y3)
comp_tm_win_cords()
elif hand_winner == 'player_tm':
match_cards(comp_played_hands[rnd], x4, y4)
match_cards(player_played_hands[rnd], x4 + 10, y4)
match_cards(comp_teammate_played_hands[rnd], x4 + 20, y4)
match_cards(player_teammate_played_hands[rnd], x4 + 30, y4)
player_tm_win_cord()
elif player_trump_selected:
if hand_winner == 'comp':
match_cards(player_played_hands[rnd], x1, y1)
match_cards(comp_teammate_played_hands[rnd], x1 + 10, y1)
match_cards(player_teammate_played_hands[rnd], x1 + 20, y1)
match_cards(comp_played_hands[rnd], x1 + 30, y1)
comp_win_cords()
elif hand_winner == 'player':
match_cards(player_played_hands[rnd], x2, y2)
match_cards(comp_teammate_played_hands[rnd], x2 + 10, y2)
match_cards(player_teammate_played_hands[rnd], x2 + 20, y2)
match_cards(comp_played_hands[rnd], x2 + 30, y2)
player_win_cord()
elif hand_winner == 'comp_tm':
match_cards(player_played_hands[rnd], x3, y3)
match_cards(comp_teammate_played_hands[rnd], x3 + 10, y3)
match_cards(player_teammate_played_hands[rnd], x3 + 20, y3)
match_cards(comp_played_hands[rnd], x3 + 30, y3)
comp_tm_win_cords()
elif hand_winner == 'player_tm':
match_cards(player_played_hands[rnd], x4, y4)
match_cards(comp_teammate_played_hands[rnd], x4 + 10, y4)
match_cards(player_teammate_played_hands[rnd], x4 + 20, y4)
match_cards(comp_played_hands[rnd], x4 + 30, y4)
player_tm_win_cord()
def each_player_round_done():
global comp_round_done, player_round_done, comp_tm_round_done, player_tm_round_done
if comp_round_done and player_round_done and comp_tm_round_done and player_tm_round_done:
return True
return False
def play_round(rnd):
global round_status, winner
x_limit = 920 - (75 * rnd)
turn = 14 - rnd
c1 = 14 - rnd
c2 = 14 - rnd
c3 = 14 - rnd
c1_pos = 410 - (20 * rnd)
c2_pos = 410 - (20 * rnd)
c3_pos = 610 - (20 * rnd)
if comp_trump_selected and comp_round_done is False and winner is None:
comp_play(True, rnd, c3_pos)
if player_trump_selected and player_round_done is False and winner is None:
player_play(True, rnd, turn, x_limit, c1, c2, c3, c1_pos, c2_pos, c3_pos)
if comp_round_done and player_round_done is False and winner is None:
player_play(False, rnd, turn, x_limit, c1 - 1, c2, c3, c1_pos - 20, c2_pos, c3_pos)
if player_round_done:
comp_tm_play(rnd, c3_pos)
if comp_tm_round_done:
player_tm_play(rnd, c3_pos - 20)
if player_round_done and comp_round_done is False and winner is None:
if comp_tm_round_done is False:
comp_tm_play(rnd, c3_pos)
if comp_tm_round_done:
player_tm_play(rnd, c3_pos - 20)
if player_tm_round_done:
comp_play(False, rnd, c3_pos - 20)
if winner == 'comp' and each_player_round_done() is False:
comp_play(False, rnd, c3_pos)
if comp_round_done:
player_play(False, rnd, turn, x_limit, c1 - 1, c2, c3, c1_pos - 20, c2_pos, c3_pos)
if player_round_done:
comp_tm_play(rnd, c3_pos)
if comp_tm_round_done:
player_tm_play(rnd, c3_pos - 20)
if winner == 'player' and each_player_round_done() is False:
player_play(False, rnd, turn, x_limit, c1, c2, c3, c1_pos, c2_pos, c3_pos)
if player_round_done:
comp_tm_play(rnd, c3_pos)
if comp_tm_round_done:
player_tm_play(rnd, c3_pos - 20)
if player_tm_round_done:
comp_play(False, rnd, c3_pos - 20)
if winner == 'comp_tm' and each_player_round_done() is False:
comp_tm_play(rnd, c3_pos)
if comp_tm_round_done:
player_tm_play(rnd, c3_pos - 20)
if player_tm_round_done:
comp_play(False, rnd, c3_pos - 20)
if comp_round_done:
player_play(False, rnd, turn, x_limit, c1 - 1, c2 - 1, c3 - 1, c1_pos - 20, c2_pos - 20,
c3_pos - 20)
if winner == 'player_tm' and each_player_round_done() is False:
player_tm_play(rnd, c3_pos - 20)
if player_tm_round_done:
comp_play(False, rnd, c3_pos - 20)
if comp_round_done:
player_play(False, rnd, turn, x_limit, c1 - 1, c2, c3 - 1, c1_pos - 20, c2_pos, c3_pos - 20)
if player_round_done:
comp_tm_play(rnd, c3_pos - 20)
if player_tm_round_done and player_round_done and comp_round_done and comp_tm_round_done:
winner = check_won_hand(rnd)
set_won_hand(rnd, winner)
play_area_clear()
round_status['round' + str(rnd)] = True
def check_rounds_completed():
global all_rounds_done
for j in range(1, 14):
if round_status['round' + str(j)] is True:
all_rounds_done = True
else:
all_rounds_done = False
break
def winner_screen():
global run
win2 = pygame.display.set_mode((400, 200))
win2.fill((0, 0, 0))
win2.blit(d_intro_suit, (0, 70))
win2.blit(i_intro_suit, (100, 80))
win2.blit(g_intro_suit, (200, 70))
win2.blit(u_intro_suit, (300, 80))
leave_game = i_font.render("Press Enter to Quit", True, (161, 17, 17))
win2.blit(leave_game, (150, 180))
a = comp_won_hands
b = player_won_hands
c = comp_tm_won_hands
d = player_tm_won_hands
total_player_won = b + d
total_comp_won = a + c
comp_wins = i_font.render("Comp " + str(a), True, (255, 255, 255))
player_wins = i_font.render("Player " + str(b), True, (255, 255, 255))
comp_tm_wins = i_font.render("Comp Teammate " + str(c), True, (255, 255, 255))
player_tm_wins = i_font.render("Player Teammate " + str(d), True, (255, 255, 255))
win2.blit(comp_wins, (5, 50))
win2.blit(player_wins, (70, 50))
win2.blit(comp_tm_wins, (140, 50))
win2.blit(player_tm_wins, (270, 50))
winner_player = font.render(
player_information[0].upper() + " TEAM WINS!", True, (255, 255, 255))
winner_comp = font.render("COMP TEAM WINS!", True, (255, 255, 255))
match_draw = font.render("ITS A DRAW!", True, (255, 255, 255))
if total_player_won > total_comp_won:
win2.blit(winner_player, (100, 20))
elif total_player_won < total_comp_won:
win2.blit(winner_comp, (120, 20))
elif total_comp_won == total_player_won:
win2.blit(match_draw, (120, 20))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
run = False
if event.key == pygame.K_SPACE:
pass
def start_play():
global all_rounds_done
if trump_list_chosen is False:
trump_list()
if round_status['round1'] is False:
play_round(1)
start_new_round()
for i in range(2, 13):
if round_status['round' + str(i)] is False and round_status['round' + str(i - 1)]:
play_round(i)
start_new_round()
if round_status['round13'] is False and round_status['round12']:
play_round(13)
check_rounds_completed()
if all_rounds_done:
play_area_clear()
winner_screen()
if check_for_quit:
run = False
else:
run = True
d_x = 0
d_y = -100
i_x = 100
i_y = -150
g_x = 200
g_y = -100
u_x = 300
u_y = -150
game_intro = False
while run:
while game_intro is False:
win2 = pygame.display.set_mode((400, 200))
win2.fill((0, 0, 0))
user_comm = i_font.render("Press Enter to begin game", True, (161, 17, 17))
if Trumps:
intro_show_trump = i_font.render("You select trumps", True, (161, 17, 17))
win2.blit(intro_show_trump, (260, 180))
elif Trumps is False:
intro_show_trump = i_font.render("Computer selects trumps", True, (161, 17, 17))
win2.blit(intro_show_trump, (230, 180))
win2.blit(user_comm, (20, 180))
win2.blit(d_intro_suit, (d_x, d_y))
win2.blit(i_intro_suit, (i_x, i_y))
win2.blit(g_intro_suit, (g_x, g_y))
win2.blit(u_intro_suit, (u_x, u_y))
d_y += 1
i_y += 1
g_y += 1
u_y += 1
if d_y == 70:
d_y = -100
if i_y == 70:
i_y = -150
if g_y == 70:
g_y = -100
if u_y == 70:
u_y = -150
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
game_intro = True
if game_intro:
initialize_game()
start_play()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.update()
| 49.825921
| 120
| 0.549172
| 16,044
| 117,639
| 3.706557
| 0.025929
| 0.07557
| 0.050851
| 0.043385
| 0.874084
| 0.830497
| 0.814337
| 0.786692
| 0.755717
| 0.724793
| 0
| 0.032039
| 0.363502
| 117,639
| 2,360
| 121
| 49.847034
| 0.76217
| 0.001981
| 0
| 0.577609
| 0
| 0
| 0.020333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019625
| false
| 0.000446
| 0.00223
| 0
| 0.096789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2f4a4a3326f08cfa2ab2509c2ec73cc2da70426
| 40
|
py
|
Python
|
rackspace/heat_store/catalog/__init__.py
|
rohithkumar-rackspace/rcbops
|
fb690bc528499bbf9aebba3ab0cce0b4dffd9e35
|
[
"Apache-2.0"
] | null | null | null |
rackspace/heat_store/catalog/__init__.py
|
rohithkumar-rackspace/rcbops
|
fb690bc528499bbf9aebba3ab0cce0b4dffd9e35
|
[
"Apache-2.0"
] | null | null | null |
rackspace/heat_store/catalog/__init__.py
|
rohithkumar-rackspace/rcbops
|
fb690bc528499bbf9aebba3ab0cce0b4dffd9e35
|
[
"Apache-2.0"
] | null | null | null |
from .solution import Solution, Catalog
| 20
| 39
| 0.825
| 5
| 40
| 6.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c2b6c548215d7a1cfd22cb3107705303356bb5a
| 188
|
py
|
Python
|
src/assignments/main_assignment2.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
src/assignments/main_assignment2.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | 1
|
2018-03-08T19:46:08.000Z
|
2018-03-08T20:00:47.000Z
|
src/assignments/main_assignment2.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
from assignment2 import faculty_evaluation_result
'''Write code to call the faculty_evaluation_result function with data of your choice'''
print(faculty_evaluation_result(0,9,20,6,9,32))
| 37.6
| 88
| 0.824468
| 30
| 188
| 4.966667
| 0.766667
| 0.342282
| 0.463087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052941
| 0.095745
| 188
| 4
| 89
| 47
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
0c3aa552fd8ef35e7cfb2629f35748d620154e15
| 12,462
|
py
|
Python
|
monitoring/prober/scd/test_operation_references_error_cases_v0_3_17.py
|
Orbitalize/InterUSS-Platform
|
a1d60ec928dc5c63f9dcddd195bfeda7c4c1c84b
|
[
"Apache-2.0"
] | 58
|
2019-10-03T19:15:47.000Z
|
2022-03-09T16:50:47.000Z
|
monitoring/prober/scd/test_operation_references_error_cases_v0_3_17.py
|
Orbitalize/InterUSS-Platform
|
a1d60ec928dc5c63f9dcddd195bfeda7c4c1c84b
|
[
"Apache-2.0"
] | 283
|
2019-09-30T18:35:02.000Z
|
2022-03-29T13:36:53.000Z
|
monitoring/prober/scd/test_operation_references_error_cases_v0_3_17.py
|
Orbitalize/InterUSS-Platform
|
a1d60ec928dc5c63f9dcddd195bfeda7c4c1c84b
|
[
"Apache-2.0"
] | 51
|
2019-10-08T18:47:36.000Z
|
2022-03-23T08:44:06.000Z
|
"""Operation References corner cases error tests:
"""
import datetime
import json
import uuid
import yaml
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import scd
from monitoring.monitorlib.scd import SCOPE_SC
from monitoring.prober.infrastructure import for_api_versions, register_resource_type
OP_TYPE = register_resource_type(342, 'Primary operational intent')
OP_TYPE2 = register_resource_type(343, 'Conflicting operational intent')
@for_api_versions(scd.API_0_3_17)
def test_ensure_clean_workspace(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_area_too_large(scd_api, scd_session):
with open('./scd/resources/op_ref_area_too_large_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_start_end_times_past(scd_api, scd_session):
with open('./scd/resources/op_ref_start_end_times_past_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
# It is ok (and useful) to query for past Operations that may not yet have
# been explicitly deleted. This is unlike remote ID where ISAs are
# auto-removed from the perspective of the client immediately after their end
# time.
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_units(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_units_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_altitude_ref(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_altitude_ref_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_uss_base_url_non_tls(ids, scd_api, scd_session):
with open('./scd/resources/op_uss_base_url_non_tls_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id_random(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription_v15.json', 'r') as f:
req = json.load(f)
req['subscription_id'] = uuid.uuid4().hex
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_new_and_existing_subscription(ids, scd_api, scd_session):
with open('./scd/resources/op_new_and_existing_subscription_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_end_time_past(ids, scd_api, scd_session):
with open('./scd/resources/op_end_time_past_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_already_exists(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operational_intent_references/{}/{}'.format(ids(OP_TYPE), ovn))
assert resp.status_code == 200, resp.content
# Verify deletion
resp = scd_session.get('/operational_intent_references/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 404, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_400_version1(ids, scd_api, scd_session):
with open('./scd/resources/op_400_version1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_state_version0(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_state_version0_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_lat_lon_range(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_lat_lon_range_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_area_too_large_put(ids, scd_api, scd_session):
with open('./scd/resources/op_area_too_large_put_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_time_format(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_time_format_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_volume(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_volume_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_repeated_requests(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
print(resp.json()['operational_intent_reference']['ovn'])
assert 'operational_intent_reference' in resp.json(), resp.content
assert 'ovn' in resp.json()['operational_intent_reference'], resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operational_intent_references/{}/{}'.format(ids(OP_TYPE), ovn))
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_invalid_id(scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/not_uuid_format', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_missing_conflicted_operation(ids, scd_api, scd_session):
# Emplace the initial version of Operation 1
with open('./scd/resources/op_missing_initial.yaml', 'r') as f:
req = yaml.full_load(f)
dt = datetime.datetime.utcnow() - scd.start_of(req['extents'])
req['extents'] = scd.offset_time(req['extents'], dt)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn1a = resp.json()['operational_intent_reference']['ovn']
sub_id = resp.json()['operational_intent_reference']['subscription_id']
# Emplace the pre-existing Operation that conflicted in the original observation
with open('./scd/resources/op_missing_preexisting_unknown.yaml', 'r') as f:
req = yaml.full_load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE2)), json=req)
assert resp.status_code == 200, resp.content
# Attempt to update Operation 1 without OVN for the pre-existing Operation
with open('./scd/resources/op_missing_update.json', 'r') as f:
req = json.load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
req['subscription_id'] = sub_id
resp = scd_session.put('/operational_intent_references/{}/{}'.format(ids(OP_TYPE), ovn1a), json=req)
assert resp.status_code == 409, resp.content
# checking entity conflicts
conflicts = []
data = resp.json()
assert 'missing_operational_intents' in data
assert ids(OP_TYPE2) in [intent['id'] for intent in data['missing_operational_intents']], resp.content
# Perform an area-based query on the area occupied by Operation 1
with open('./scd/resources/op_missing_query.json', 'r') as f:
req = json.load(f)
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 200, resp.content
ops = [op['id'] for op in resp.json()['operational_intent_references']]
assert ids(OP_TYPE) in ops, resp.content
# ids(OP_ID2) not expected here because its ceiling is <575m whereas query floor is
# >591m.
assert ids(OP_TYPE2) not in ops, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_big_operation_search(scd_api, scd_session):
with open('./scd/resources/op_big_operation.json', 'r') as f:
req = json.load(f)
dt = datetime.datetime.utcnow() - scd.start_of([req['area_of_interest']])
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_clean_up(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
# only the owner of the subscription can delete a operation reference.
assert 'operational_intent_reference' in resp.json(), resp.content
assert 'ovn' in resp.json()['operational_intent_reference'], resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
resp = scd_session.delete('/operational_intent_references/{}/{}'.format(op_id, ovn), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
| 40.859016
| 106
| 0.747312
| 1,967
| 12,462
| 4.426538
| 0.101169
| 0.062019
| 0.054669
| 0.06891
| 0.819456
| 0.794418
| 0.772137
| 0.767658
| 0.744458
| 0.72034
| 0
| 0.024419
| 0.116033
| 12,462
| 304
| 107
| 40.993421
| 0.765977
| 0.063152
| 0
| 0.681416
| 0
| 0
| 0.241589
| 0.216873
| 0
| 0
| 0
| 0
| 0.176991
| 1
| 0.097345
| false
| 0.00885
| 0.035398
| 0
| 0.132743
| 0.004425
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a773156cca17d10b631e412b78b51a015df3868d
| 9,424
|
py
|
Python
|
sympy/ntheory/tests/test_bbp_pi.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 2
|
2019-02-05T19:20:24.000Z
|
2019-04-23T13:24:38.000Z
|
sympy/ntheory/tests/test_bbp_pi.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 9
|
2021-03-19T03:06:53.000Z
|
2022-03-12T00:37:04.000Z
|
sympy/ntheory/tests/test_bbp_pi.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 1
|
2022-02-22T23:53:22.000Z
|
2022-02-22T23:53:22.000Z
|
from random import randint
from sympy.ntheory.bbp_pi import pi_hex_digits
from sympy.utilities.pytest import raises
# http://www.herongyang.com/Cryptography/Blowfish-First-8366-Hex-Digits-of-PI.html
# There are actually 8336 listed there; with the prepended 3 there are 8337
# below
dig=''.join('''
3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c89452821e638d013
77be5466cf34e90c6cc0ac29b7c97c50dd3f84d5b5b54709179216d5d98979fb1bd1310ba698dfb5
ac2ffd72dbd01adfb7b8e1afed6a267e96ba7c9045f12c7f9924a19947b3916cf70801f2e2858efc
16636920d871574e69a458fea3f4933d7e0d95748f728eb658718bcd5882154aee7b54a41dc25a59
b59c30d5392af26013c5d1b023286085f0ca417918b8db38ef8e79dcb0603a180e6c9e0e8bb01e8a
3ed71577c1bd314b2778af2fda55605c60e65525f3aa55ab945748986263e8144055ca396a2aab10
b6b4cc5c341141e8cea15486af7c72e993b3ee1411636fbc2a2ba9c55d741831f6ce5c3e169b8793
1eafd6ba336c24cf5c7a325381289586773b8f48986b4bb9afc4bfe81b6628219361d809ccfb21a9
91487cac605dec8032ef845d5de98575b1dc262302eb651b8823893e81d396acc50f6d6ff383f442
392e0b4482a484200469c8f04a9e1f9b5e21c66842f6e96c9a670c9c61abd388f06a51a0d2d8542f
68960fa728ab5133a36eef0b6c137a3be4ba3bf0507efb2a98a1f1651d39af017666ca593e82430e
888cee8619456f9fb47d84a5c33b8b5ebee06f75d885c12073401a449f56c16aa64ed3aa62363f77
061bfedf72429b023d37d0d724d00a1248db0fead349f1c09b075372c980991b7b25d479d8f6e8de
f7e3fe501ab6794c3b976ce0bd04c006bac1a94fb6409f60c45e5c9ec2196a246368fb6faf3e6c53
b51339b2eb3b52ec6f6dfc511f9b30952ccc814544af5ebd09bee3d004de334afd660f2807192e4b
b3c0cba85745c8740fd20b5f39b9d3fbdb5579c0bd1a60320ad6a100c6402c7279679f25fefb1fa3
cc8ea5e9f8db3222f83c7516dffd616b152f501ec8ad0552ab323db5fafd23876053317b483e00df
829e5c57bbca6f8ca01a87562edf1769dbd542a8f6287effc3ac6732c68c4f5573695b27b0bbca58
c8e1ffa35db8f011a010fa3d98fd2183b84afcb56c2dd1d35b9a53e479b6f84565d28e49bc4bfb97
90e1ddf2daa4cb7e3362fb1341cee4c6e8ef20cada36774c01d07e9efe2bf11fb495dbda4dae9091
98eaad8e716b93d5a0d08ed1d0afc725e08e3c5b2f8e7594b78ff6e2fbf2122b648888b812900df0
1c4fad5ea0688fc31cd1cff191b3a8c1ad2f2f2218be0e1777ea752dfe8b021fa1e5a0cc0fb56f74
e818acf3d6ce89e299b4a84fe0fd13e0b77cc43b81d2ada8d9165fa2668095770593cc7314211a14
77e6ad206577b5fa86c75442f5fb9d35cfebcdaf0c7b3e89a0d6411bd3ae1e7e4900250e2d2071b3
5e226800bb57b8e0af2464369bf009b91e5563911d59dfa6aa78c14389d95a537f207d5ba202e5b9
c5832603766295cfa911c819684e734a41b3472dca7b14a94a1b5100529a532915d60f573fbc9bc6
e42b60a47681e6740008ba6fb5571be91ff296ec6b2a0dd915b6636521e7b9f9b6ff34052ec58556
6453b02d5da99f8fa108ba47996e85076a4b7a70e9b5b32944db75092ec4192623ad6ea6b049a7df
7d9cee60b88fedb266ecaa8c71699a17ff5664526cc2b19ee1193602a575094c29a0591340e4183a
3e3f54989a5b429d656b8fe4d699f73fd6a1d29c07efe830f54d2d38e6f0255dc14cdd20868470eb
266382e9c6021ecc5e09686b3f3ebaefc93c9718146b6a70a1687f358452a0e286b79c5305aa5007
373e07841c7fdeae5c8e7d44ec5716f2b8b03ada37f0500c0df01c1f040200b3ffae0cf51a3cb574
b225837a58dc0921bdd19113f97ca92ff69432477322f547013ae5e58137c2dadcc8b576349af3dd
a7a94461460fd0030eecc8c73ea4751e41e238cd993bea0e2f3280bba1183eb3314e548b384f6db9
086f420d03f60a04bf2cb8129024977c795679b072bcaf89afde9a771fd9930810b38bae12dccf3f
2e5512721f2e6b7124501adde69f84cd877a5847187408da17bc9f9abce94b7d8cec7aec3adb851d
fa63094366c464c3d2ef1c18473215d908dd433b3724c2ba1612a14d432a65c45150940002133ae4
dd71dff89e10314e5581ac77d65f11199b043556f1d7a3c76b3c11183b5924a509f28fe6ed97f1fb
fa9ebabf2c1e153c6e86e34570eae96fb1860e5e0a5a3e2ab3771fe71c4e3d06fa2965dcb999e71d
0f803e89d65266c8252e4cc9789c10b36ac6150eba94e2ea78a5fc3c531e0a2df4f2f74ea7361d2b
3d1939260f19c279605223a708f71312b6ebadfe6eeac31f66e3bc4595a67bc883b17f37d1018cff
28c332ddefbe6c5aa56558218568ab9802eecea50fdb2f953b2aef7dad5b6e2f841521b628290761
70ecdd4775619f151013cca830eb61bd960334fe1eaa0363cfb5735c904c70a239d59e9e0bcbaade
14eecc86bc60622ca79cab5cabb2f3846e648b1eaf19bdf0caa02369b9655abb5040685a323c2ab4
b3319ee9d5c021b8f79b540b19875fa09995f7997e623d7da8f837889a97e32d7711ed935f166812
810e358829c7e61fd696dedfa17858ba9957f584a51b2272639b83c3ff1ac24696cdb30aeb532e30
548fd948e46dbc312858ebf2ef34c6ffeafe28ed61ee7c3c735d4a14d9e864b7e342105d14203e13
e045eee2b6a3aaabeadb6c4f15facb4fd0c742f442ef6abbb5654f3b1d41cd2105d81e799e86854d
c7e44b476a3d816250cf62a1f25b8d2646fc8883a0c1c7b6a37f1524c369cb749247848a0b5692b2
85095bbf00ad19489d1462b17423820e0058428d2a0c55f5ea1dadf43e233f70613372f0928d937e
41d65fecf16c223bdb7cde3759cbee74604085f2a7ce77326ea607808419f8509ee8efd85561d997
35a969a7aac50c06c25a04abfc800bcadc9e447a2ec3453484fdd567050e1e9ec9db73dbd3105588
cd675fda79e3674340c5c43465713e38d83d28f89ef16dff20153e21e78fb03d4ae6e39f2bdb83ad
f7e93d5a68948140f7f64c261c94692934411520f77602d4f7bcf46b2ed4a20068d40824713320f4
6a43b7d4b7500061af1e39f62e9724454614214f74bf8b88404d95fc1d96b591af70f4ddd366a02f
45bfbc09ec03bd97857fac6dd031cb850496eb27b355fd3941da2547e6abca0a9a28507825530429
f40a2c86dae9b66dfb68dc1462d7486900680ec0a427a18dee4f3ffea2e887ad8cb58ce0067af4d6
b6aace1e7cd3375fecce78a399406b2a4220fe9e35d9f385b9ee39d7ab3b124e8b1dc9faf74b6d18
5626a36631eae397b23a6efa74dd5b43326841e7f7ca7820fbfb0af54ed8feb397454056acba4895
2755533a3a20838d87fe6ba9b7d096954b55a867bca1159a58cca9296399e1db33a62a4a563f3125
f95ef47e1c9029317cfdf8e80204272f7080bb155c05282ce395c11548e4c66d2248c1133fc70f86
dc07f9c9ee41041f0f404779a45d886e17325f51ebd59bc0d1f2bcc18f41113564257b7834602a9c
60dff8e8a31f636c1b0e12b4c202e1329eaf664fd1cad181156b2395e0333e92e13b240b62eebeb9
2285b2a20ee6ba0d99de720c8c2da2f728d012784595b794fd647d0862e7ccf5f05449a36f877d48
fac39dfd27f33e8d1e0a476341992eff743a6f6eabf4f8fd37a812dc60a1ebddf8991be14cdb6e6b
0dc67b55106d672c372765d43bdcd0e804f1290dc7cc00ffa3b5390f92690fed0b667b9ffbcedb7d
9ca091cf0bd9155ea3bb132f88515bad247b9479bf763bd6eb37392eb3cc1159798026e297f42e31
2d6842ada7c66a2b3b12754ccc782ef11c6a124237b79251e706a1bbe64bfb63501a6b101811caed
fa3d25bdd8e2e1c3c9444216590a121386d90cec6ed5abea2a64af674eda86a85fbebfe98864e4c3
fe9dbc8057f0f7c08660787bf86003604dd1fd8346f6381fb07745ae04d736fccc83426b33f01eab
71b08041873c005e5f77a057bebde8ae2455464299bf582e614e58f48ff2ddfda2f474ef388789bd
c25366f9c3c8b38e74b475f25546fcd9b97aeb26618b1ddf84846a0e79915f95e2466e598e20b457
708cd55591c902de4cb90bace1bb8205d011a862487574a99eb77f19b6e0a9dc09662d09a1c43246
33e85a1f0209f0be8c4a99a0251d6efe101ab93d1d0ba5a4dfa186f20f2868f169dcb7da83573906
fea1e2ce9b4fcd7f5250115e01a70683faa002b5c40de6d0279af88c27773f8641c3604c0661a806
b5f0177a28c0f586e0006058aa30dc7d6211e69ed72338ea6353c2dd94c2c21634bbcbee5690bcb6
deebfc7da1ce591d766f05e4094b7c018839720a3d7c927c2486e3725f724d9db91ac15bb4d39eb8
fced54557808fca5b5d83d7cd34dad0fc41e50ef5eb161e6f8a28514d96c51133c6fd5c7e756e14e
c4362abfceddc6c837d79a323492638212670efa8e406000e03a39ce37d3faf5cfabc277375ac52d
1b5cb0679e4fa33742d382274099bc9bbed5118e9dbf0f7315d62d1c7ec700c47bb78c1b6b21a190
45b26eb1be6a366eb45748ab2fbc946e79c6a376d26549c2c8530ff8ee468dde7dd5730a1d4cd04d
c62939bbdba9ba4650ac9526e8be5ee304a1fad5f06a2d519a63ef8ce29a86ee22c089c2b843242e
f6a51e03aa9cf2d0a483c061ba9be96a4d8fe51550ba645bd62826a2f9a73a3ae14ba99586ef5562
e9c72fefd3f752f7da3f046f6977fa0a5980e4a91587b086019b09e6ad3b3ee593e990fd5a9e34d7
972cf0b7d9022b8b5196d5ac3a017da67dd1cf3ed67c7d2d281f9f25cfadf2b89b5ad6b4725a88f5
4ce029ac71e019a5e647b0acfded93fa9be8d3c48d283b57ccf8d5662979132e28785f0191ed7560
55f7960e44e3d35e8c15056dd488f46dba03a161250564f0bdc3eb9e153c9057a297271aeca93a07
2a1b3f6d9b1e6321f5f59c66fb26dcf3197533d928b155fdf5035634828aba3cbb28517711c20ad9
f8abcc5167ccad925f4de817513830dc8e379d58629320f991ea7a90c2fb3e7bce5121ce64774fbe
32a8b6e37ec3293d4648de53696413e680a2ae0810dd6db22469852dfd09072166b39a460a6445c0
dd586cdecf1c20c8ae5bbef7dd1b588d40ccd2017f6bb4e3bbdda26a7e3a59ff453e350a44bcb4cd
d572eacea8fa6484bb8d6612aebf3c6f47d29be463542f5d9eaec2771bf64e6370740e0d8de75b13
57f8721671af537d5d4040cb084eb4e2cc34d2466a0115af84e1b0042895983a1d06b89fb4ce6ea0
486f3f3b823520ab82011a1d4b277227f8611560b1e7933fdcbb3a792b344525bda08839e151ce79
4b2f32c9b7a01fbac9e01cc87ebcc7d1f6cf0111c3a1e8aac71a908749d44fbd9ad0dadecbd50ada
380339c32ac69136678df9317ce0b12b4ff79e59b743f5bb3af2d519ff27d9459cbf97222c15e6fc
2a0f91fc719b941525fae59361ceb69cebc2a8645912baa8d1b6c1075ee3056a0c10d25065cb03a4
42e0ec6e0e1698db3b4c98a0be3278e9649f1f9532e0d392dfd3a0342b8971f21e1b0a74414ba334
8cc5be7120c37632d8df359f8d9b992f2ee60b6f470fe3f11de54cda541edad891ce6279cfcd3e7e
6f1618b166fd2c1d05848fd2c5f6fb2299f523f357a632762393a8353156cccd02acf081625a75eb
b56e16369788d273ccde96629281b949d04c50901b71c65614e6c6c7bd327a140a45e1d006c3f27b
9ac9aa53fd62a80f00bb25bfe235bdd2f671126905b2040222b6cbcf7ccd769c2b53113ec01640e3
d338abbd602547adf0ba38209cf746ce7677afa1c52075606085cbfe4e8ae88dd87aaaf9b04cf9aa
7e1948c25c02fb8a8c01c36ae4d6ebe1f990d4f869a65cdea03f09252dc208e69fb74e6132ce77e2
5b578fdfe33ac372e6'''.split())
def test_hex_pi_nth_digits():
assert pi_hex_digits(0) == '3243f6a8885a30'
assert pi_hex_digits(1) == '243f6a8885a308'
assert pi_hex_digits(10000) == '68ac8fcfb8016c'
assert pi_hex_digits(13) == '08d313198a2e03'
assert pi_hex_digits(0, 3) == '324'
assert pi_hex_digits(0, 0) == ''
raises(ValueError, lambda: pi_hex_digits(-1))
raises(ValueError, lambda: pi_hex_digits(3.14))
# this will pick a random segment to compute every time
# it is run. If it ever fails, there is an error in the
# computation.
n = randint(0, len(dig))
prec = randint(0, len(dig) - n)
assert pi_hex_digits(n, prec) == dig[n: n + prec]
| 70.328358
| 82
| 0.957767
| 257
| 9,424
| 35.023346
| 0.700389
| 0.010999
| 0.012221
| 0.013221
| 0.013332
| 0.007333
| 0
| 0
| 0
| 0
| 0
| 0.576536
| 0.029499
| 9,424
| 133
| 83
| 70.857143
| 0.40761
| 0.029817
| 0
| 0
| 0
| 0
| 0.930495
| 0.910574
| 0
| 1
| 0
| 0
| 0.057851
| 1
| 0.008264
| false
| 0
| 0.024793
| 0
| 0.033058
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac04c94e62c77337b8f80f8c29c8a6cd0663a28d
| 48
|
py
|
Python
|
utils_pkg/utils_pkg/__init__.py
|
felipery03/disaster-response
|
00f894753ac9df234de91412d9ad4cbff4ff76ae
|
[
"MIT"
] | 1
|
2021-01-20T11:43:58.000Z
|
2021-01-20T11:43:58.000Z
|
utils_pkg/utils_pkg/__init__.py
|
felipery03/disaster-response
|
00f894753ac9df234de91412d9ad4cbff4ff76ae
|
[
"MIT"
] | null | null | null |
utils_pkg/utils_pkg/__init__.py
|
felipery03/disaster-response
|
00f894753ac9df234de91412d9ad4cbff4ff76ae
|
[
"MIT"
] | null | null | null |
from .utils import *
from .transformers import *
| 24
| 27
| 0.770833
| 6
| 48
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 27
| 24
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac1858bdc936f2d4c167bf925a1f70eb341c1a79
| 35
|
py
|
Python
|
txsocks/__init__.py
|
infin8/txsocks
|
d3edd5c2b59cc584f78dd607e5d776e3ab0ef3de
|
[
"MIT"
] | null | null | null |
txsocks/__init__.py
|
infin8/txsocks
|
d3edd5c2b59cc584f78dd607e5d776e3ab0ef3de
|
[
"MIT"
] | null | null | null |
txsocks/__init__.py
|
infin8/txsocks
|
d3edd5c2b59cc584f78dd607e5d776e3ab0ef3de
|
[
"MIT"
] | null | null | null |
#
from socks5 import ClientFactory
| 11.666667
| 32
| 0.828571
| 4
| 35
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.142857
| 35
| 2
| 33
| 17.5
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac3909b5a43719b1f0b9c36b5c29fb3be2a610b4
| 108
|
py
|
Python
|
jobboard/views.py
|
yaelerdr/SJMaster
|
3158fd8ca6dca036367879cd43152d4531e34960
|
[
"MIT"
] | null | null | null |
jobboard/views.py
|
yaelerdr/SJMaster
|
3158fd8ca6dca036367879cd43152d4531e34960
|
[
"MIT"
] | null | null | null |
jobboard/views.py
|
yaelerdr/SJMaster
|
3158fd8ca6dca036367879cd43152d4531e34960
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def board(request):
return render(request, 'jobboard/board.html')
| 18
| 49
| 0.759259
| 14
| 108
| 5.857143
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 108
| 5
| 50
| 21.6
| 0.88172
| 0
| 0
| 0
| 0
| 0
| 0.175926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ac476ed804b5d9288fad5b946dccb722800a1444
| 42
|
py
|
Python
|
controlcenter/__init__.py
|
EnriqueSoria/django-controlcenter
|
fa262bf871d43cc114c62f863c258832cd267d9a
|
[
"BSD-3-Clause"
] | 980
|
2016-03-07T18:35:34.000Z
|
2022-03-26T10:30:55.000Z
|
controlcenter/__init__.py
|
EnriqueSoria/django-controlcenter
|
fa262bf871d43cc114c62f863c258832cd267d9a
|
[
"BSD-3-Clause"
] | 58
|
2016-05-04T07:33:18.000Z
|
2022-01-22T04:01:08.000Z
|
controlcenter/__init__.py
|
EnriqueSoria/django-controlcenter
|
fa262bf871d43cc114c62f863c258832cd267d9a
|
[
"BSD-3-Clause"
] | 112
|
2016-03-09T01:15:45.000Z
|
2022-03-11T16:22:43.000Z
|
from .dashboards import Dashboard # NOQA
| 21
| 41
| 0.785714
| 5
| 42
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 1
| 42
| 42
| 0.942857
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac5828e79de0400c2e7d401c713035584309e866
| 157
|
py
|
Python
|
Modules/Menus/Menu.py
|
Spraynard/Card-Game-Suite
|
e7dada901e36c818cbce345758a11ca19553d5e4
|
[
"MIT"
] | null | null | null |
Modules/Menus/Menu.py
|
Spraynard/Card-Game-Suite
|
e7dada901e36c818cbce345758a11ca19553d5e4
|
[
"MIT"
] | null | null | null |
Modules/Menus/Menu.py
|
Spraynard/Card-Game-Suite
|
e7dada901e36c818cbce345758a11ca19553d5e4
|
[
"MIT"
] | null | null | null |
class Menu(object):
def __init__(self):
gameList = None;
def _populateGameList(self):
pass
def chooseGame(self):
pass
def start(self):
pass
| 12.076923
| 29
| 0.681529
| 20
| 157
| 5.1
| 0.6
| 0.235294
| 0.215686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210191
| 157
| 12
| 30
| 13.083333
| 0.822581
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0.333333
| 0
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
ac6fee2012e1ec34cb6ef3b75dceae9b4fc95d85
| 173
|
py
|
Python
|
stDrosophila/__init__.py
|
chen-zhan/stDrosophila-release-1
|
1b128eb81ff4d076f7271abb1298639be6d08310
|
[
"BSD-3-Clause"
] | null | null | null |
stDrosophila/__init__.py
|
chen-zhan/stDrosophila-release-1
|
1b128eb81ff4d076f7271abb1298639be6d08310
|
[
"BSD-3-Clause"
] | null | null | null |
stDrosophila/__init__.py
|
chen-zhan/stDrosophila-release-1
|
1b128eb81ff4d076f7271abb1298639be6d08310
|
[
"BSD-3-Clause"
] | 1
|
2022-03-24T07:29:01.000Z
|
2022-03-24T07:29:01.000Z
|
"""
Toolkit for analyzing Drosophila spatial transcriptome data.
"""
from . import envs
from . import io
from . import od
from . import pl
from . import pp
from . import tl
| 17.3
| 60
| 0.734104
| 25
| 173
| 5.08
| 0.6
| 0.472441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190751
| 173
| 9
| 61
| 19.222222
| 0.907143
| 0.346821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3bb3a8680a51053ccc42c2728837bb82a7b0d277
| 204
|
py
|
Python
|
newproject/mainapp/admin.py
|
Floou/new-django-project
|
b16af7b1d75ab32a5c18c406b965e13af4a465b5
|
[
"Apache-2.0"
] | null | null | null |
newproject/mainapp/admin.py
|
Floou/new-django-project
|
b16af7b1d75ab32a5c18c406b965e13af4a465b5
|
[
"Apache-2.0"
] | 1
|
2020-11-14T06:07:36.000Z
|
2020-11-14T06:07:36.000Z
|
newproject/mainapp/admin.py
|
Floou/new-django-project
|
b16af7b1d75ab32a5c18c406b965e13af4a465b5
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from mainapp.models import Team, Trainer,Player , Match
admin.site.register(Team)
admin.site.register(Trainer)
admin.site.register(Match)
admin.site.register(Player)
| 17
| 55
| 0.79902
| 29
| 204
| 5.62069
| 0.448276
| 0.220859
| 0.417178
| 0.269939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098039
| 204
| 11
| 56
| 18.545455
| 0.88587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cbec04f40e98b41a1618da8ed3242c1878cf0484
| 189
|
py
|
Python
|
smsymer/evm/exception/__init__.py
|
Troublor/smSymer
|
05ec597325a72d9338306a7aba6cd07d4b4c6011
|
[
"MIT"
] | 3
|
2019-06-02T15:30:47.000Z
|
2021-01-05T06:15:55.000Z
|
smsymer/evm/exception/__init__.py
|
Troublor/smSymer
|
05ec597325a72d9338306a7aba6cd07d4b4c6011
|
[
"MIT"
] | 1
|
2021-06-12T17:03:33.000Z
|
2021-06-12T17:03:33.000Z
|
smsymer/evm/exception/__init__.py
|
Troublor/smSymer
|
05ec597325a72d9338306a7aba6cd07d4b4c6011
|
[
"MIT"
] | 1
|
2020-12-04T01:51:29.000Z
|
2020-12-04T01:51:29.000Z
|
from .insufficientInputException import InsufficientInputException
from .invalidOperationException import InvalidOperationException
from .evmExecutionException import EvmExecutionException
| 47.25
| 66
| 0.920635
| 12
| 189
| 14.5
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 189
| 3
| 67
| 63
| 0.983051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
026caa1509f41b019e60f15cc4850387316d79b2
| 98
|
py
|
Python
|
third_party/universal-ctags/ctags/Units/parser-python.r/python-fullqualified-tags.d/input.py
|
f110/wing
|
31b259f723b57a6481252a4b8b717fcee6b01ff4
|
[
"MIT"
] | 4
|
2017-02-07T20:04:31.000Z
|
2022-01-30T14:04:45.000Z
|
third_party/universal-ctags/ctags/Units/parser-python.r/python-fullqualified-tags.d/input.py
|
f110/wing
|
31b259f723b57a6481252a4b8b717fcee6b01ff4
|
[
"MIT"
] | 1
|
2018-01-07T19:14:53.000Z
|
2018-01-07T19:14:53.000Z
|
third_party/universal-ctags/ctags/Units/parser-python.r/python-fullqualified-tags.d/input.py
|
f110/wing
|
31b259f723b57a6481252a4b8b717fcee6b01ff4
|
[
"MIT"
] | 1
|
2021-04-26T09:00:06.000Z
|
2021-04-26T09:00:06.000Z
|
class Foo():
def g(self):
pass
class Bar():
def f(self):
pass
| 14
| 20
| 0.408163
| 12
| 98
| 3.333333
| 0.666667
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.469388
| 98
| 6
| 21
| 16.333333
| 0.769231
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
65f62b4a7b570b9f6333b3f37ac7197e0abf7645
| 1,227
|
py
|
Python
|
home/models.py
|
R-Wolf/CFD_A_library
|
b287ed8b2932b0f3b1cf5372f8d3d3494c6439c9
|
[
"MIT"
] | 4
|
2017-03-14T05:03:03.000Z
|
2019-04-13T05:13:24.000Z
|
home/models.py
|
gpulkit96/CFD_A_library
|
b287ed8b2932b0f3b1cf5372f8d3d3494c6439c9
|
[
"MIT"
] | 1
|
2017-05-18T08:39:46.000Z
|
2017-05-18T08:39:46.000Z
|
home/models.py
|
gpulkit96/CFD_A_library
|
b287ed8b2932b0f3b1cf5372f8d3d3494c6439c9
|
[
"MIT"
] | 2
|
2017-03-13T08:54:21.000Z
|
2017-05-18T06:24:58.000Z
|
from django.db import models
class Home(models.Model):
date=models.DateTimeField(editable=False)
book_date=models.DateTimeField(editable=False, null=True)
id0 = models.IntegerField(editable=False,null=True)
id1 = models.IntegerField(editable=False,null=True)
id2 = models.IntegerField(editable=False,null=True)
id3 = models.IntegerField(editable=False,null=True)
id4 = models.IntegerField(editable=False,null=True)
id5 = models.IntegerField(editable=False,null=True)
id6 = models.IntegerField(editable=False,null=True)
id7 = models.IntegerField(editable=False,null=True)
id8 = models.IntegerField(editable=False,null=True)
id9 = models.IntegerField(editable=False,null=True)
id10 = models.IntegerField(editable=False,null=True)
id11 = models.IntegerField(editable=False,null=True)
id12 = models.IntegerField(editable=False,null=True)
id13 = models.IntegerField(editable=False,null=True)
id14 = models.IntegerField(editable=False,null=True)
id15 = models.IntegerField(editable=False,null=True)
id16 = models.IntegerField(editable=False,null=True)
id17 = models.IntegerField(editable=False,null=True)
id18 = models.IntegerField(editable=False,null=True)
id19 = models.IntegerField(editable=False,null=True)
| 49.08
| 58
| 0.797881
| 162
| 1,227
| 6.037037
| 0.222222
| 0.292434
| 0.365031
| 0.45092
| 0.871166
| 0.797546
| 0
| 0
| 0
| 0
| 0
| 0.026408
| 0.074165
| 1,227
| 25
| 59
| 49.08
| 0.834507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
65ff3a99a19aa889d973013f1a6906a9826dc651
| 23
|
py
|
Python
|
aletheia/mechanism_engine/__init__.py
|
Brain-in-Vat/Aletheia
|
8d3f8b53f2cf3540a108f6fddefc84497ddee915
|
[
"Apache-2.0"
] | null | null | null |
aletheia/mechanism_engine/__init__.py
|
Brain-in-Vat/Aletheia
|
8d3f8b53f2cf3540a108f6fddefc84497ddee915
|
[
"Apache-2.0"
] | null | null | null |
aletheia/mechanism_engine/__init__.py
|
Brain-in-Vat/Aletheia
|
8d3f8b53f2cf3540a108f6fddefc84497ddee915
|
[
"Apache-2.0"
] | null | null | null |
from mesa import Model
| 11.5
| 22
| 0.826087
| 4
| 23
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a0d759fdd2d1edfec0bb47a0f21bc5d7aa98f44
| 111,939
|
py
|
Python
|
test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
|
mrehman29/cloudstack
|
971c8a74e487e1048dd85338b80baa15902858d4
|
[
"Apache-2.0"
] | 1
|
2021-04-24T08:16:40.000Z
|
2021-04-24T08:16:40.000Z
|
test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
|
mrehman29/cloudstack
|
971c8a74e487e1048dd85338b80baa15902858d4
|
[
"Apache-2.0"
] | 1
|
2022-02-01T01:08:02.000Z
|
2022-02-01T01:08:02.000Z
|
test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
|
mrehman29/cloudstack
|
971c8a74e487e1048dd85338b80baa15902858d4
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC Internal Load Balancer functionality with Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
ApplicationLoadBalancer,
Network,
Router)
from marvin.cloudstackAPI import (listInternalLoadBalancerVMs,
stopInternalLoadBalancerVM,
startInternalLoadBalancerVM)
# Import System Modules
from nose.plugins.attrib import attr
import copy
import time
class TestNuageInternalLb(nuageTestCase):
"""Test VPC Internal LB functionality with Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuageInternalLb, cls).setUpClass()
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
# create_Internal_LB_Rule - Creates Internal LB rule in the given VPC network
def create_Internal_LB_Rule(self, network, vm_array=None, services=None, source_ip=None):
self.debug("Creating Internal LB rule in VPC network with ID - %s" % network.id)
if not services:
services = self.test_data["internal_lbrule"]
int_lb_rule = ApplicationLoadBalancer.create(self.api_client,
services=services,
sourcenetworkid=network.id,
networkid=network.id,
sourceipaddress=source_ip
)
self.debug("Created Internal LB rule")
# Assigning VMs to the created Internal Load Balancer rule
if vm_array:
self.debug("Assigning virtual machines - %s to the created Internal LB rule" % vm_array)
int_lb_rule.assign(self.api_client, vms=vm_array)
self.debug("Assigned VMs to the created Internal LB rule")
return int_lb_rule
# validate_Internal_LB_Rule - Validates the given Internal LB rule,
# matches the given Internal LB rule name and state against the list of Internal LB rules fetched
def validate_Internal_LB_Rule(self, int_lb_rule, state=None, vm_array=None):
"""Validates the Internal LB Rule"""
self.debug("Check if the Internal LB Rule is created successfully ?")
int_lb_rules = ApplicationLoadBalancer.list(self.api_client,
id=int_lb_rule.id
)
self.assertEqual(isinstance(int_lb_rules, list), True,
"List Internal LB Rule should return a valid list"
)
self.assertEqual(int_lb_rule.name, int_lb_rules[0].name,
"Name of the Internal LB Rule should match with the returned list data"
)
if state:
self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state,
"Internal LB Rule state should be '%s'" % state
)
if vm_array:
instance_ids = [instance.id for instance in int_lb_rules[0].loadbalancerinstance]
for vm in vm_array:
self.assertEqual(vm.id in instance_ids, True,
"Internal LB instance list should have the VM with ID - %s" % vm.id
)
self.debug("Internal LB Rule creation successfully validated for %s" % int_lb_rule.name)
# list_InternalLbVms - Lists deployed Internal LB VM instances
def list_InternalLbVms(self, network_id=None, source_ip=None):
listInternalLoadBalancerVMsCmd = listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd()
listInternalLoadBalancerVMsCmd.account = self.account.name
listInternalLoadBalancerVMsCmd.domainid = self.account.domainid
if network_id:
listInternalLoadBalancerVMsCmd.networkid = network_id
internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(listInternalLoadBalancerVMsCmd)
if source_ip:
return [internal_lb_vm for internal_lb_vm in internal_lb_vms
if str(internal_lb_vm.guestipaddress) == source_ip]
else:
return internal_lb_vms
# get_InternalLbVm - Returns Internal LB VM instance for the given VPC network and source ip
def get_InternalLbVm(self, network, source_ip):
self.debug("Finding the InternalLbVm for network with ID - %s and source IP address - %s" %
(network.id, source_ip))
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVms should return a valid list"
)
return internal_lb_vms[0]
# stop_InternalLbVm - Stops the given Internal LB VM instance
def stop_InternalLbVm(self, int_lb_vm, force=None):
self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
if force:
cmd.forced = force
self.api_client.stopInternalLoadBalancerVM(cmd)
# start_InternalLbVm - Starts the given Internal LB VM instance
def start_InternalLbVm(self, int_lb_vm):
self.debug("Starting InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = startInternalLoadBalancerVM.startInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
self.api_client.startInternalLoadBalancerVM(cmd)
# check_InternalLbVm_state - Checks if the Internal LB VM instance of the given VPC network and source ip is in the
# expected state form the list of fetched Internal LB VM instances
def check_InternalLbVm_state(self, network, source_ip, state=None):
self.debug("Check if the InternalLbVm is in state - %s" % state)
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVm should return a valid list"
)
if state:
self.assertEqual(internal_lb_vms[0].state, state,
"InternalLbVm is not in the expected state"
)
self.debug("InternalLbVm instance - %s is in the expected state - %s" % (internal_lb_vms[0].name, state))
# wget_from_vm_cmd - From within the given VM (ssh client),
# fetches index.html file of web server running with the given public IP
def wget_from_vm_cmd(self, ssh_client, ip_address, port):
cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + "/"
response = self.execute_cmd(ssh_client, cmd)
if "200 OK" not in response:
self.fail("Failed to wget from a VM with http server IP address - %s" % ip_address)
# Reading the wget file
cmd = "cat index.html"
wget_file = self.execute_cmd(ssh_client, cmd)
# Removing the wget file
cmd = "rm -r index.html"
self.execute_cmd(ssh_client, cmd)
return wget_file
# verify_lb_wget_file - Verifies that the given wget file (index.html) belongs to the given Internal LB rule
# assigned VMs (vm array)
def verify_lb_wget_file(self, wget_file, vm_array):
wget_server_ip = None
for vm in vm_array:
for nic in vm.nic:
if str(nic.ipaddress) in str(wget_file):
wget_server_ip = str(nic.ipaddress)
if wget_server_ip:
self.debug("Verified wget file from an Internal Load Balanced VM with http server IP address - %s"
% wget_server_ip)
else:
self.fail("Did not wget file from the Internal Load Balanced VMs - %s" % vm_array)
return wget_server_ip
# validate_internallb_algorithm_traffic - Validates Internal LB algorithms by performing multiple wget traffic tests
# against the given Internal LB VM instance (source port)
def validate_internallb_algorithm_traffic(self, ssh_client, source_ip, port, vm_array, algorithm):
# Internal LB (wget) traffic tests
iterations = 2 * len(vm_array)
wget_files = []
for i in range(iterations):
wget_files.append(self.wget_from_vm_cmd(ssh_client, source_ip, port))
# Verifying Internal LB (wget) traffic tests
wget_servers_ip_list = []
for i in range(iterations):
wget_servers_ip_list.append(self.verify_lb_wget_file(wget_files[i], vm_array))
# Validating Internal LB algorithm
if algorithm == "roundrobin" or algorithm == "leastconn":
for i in range(iterations):
if wget_servers_ip_list.count(wget_servers_ip_list[i]) is not 2:
self.fail("Round Robin Internal LB algorithm validation failed - %s" % wget_servers_ip_list)
self.debug("Successfully validated Round Robin/Least connections Internal LB algorithm - %s" %
wget_servers_ip_list)
if algorithm == "source":
for i in range(iterations):
if wget_servers_ip_list.count(wget_servers_ip_list[i]) is not iterations:
self.fail("Source Internal LB algorithm validation failed - %s" % wget_servers_ip_list)
self.debug("Successfully validated Source Internal LB algorithm - %s" % wget_servers_ip_list)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_01_nuage_internallb_vpc_Offering(self):
"""Test Nuage VSP VPC Offering with different combinations of LB service providers
"""
# 1. Verify that the network service providers supported by Nuage VSP for VPC Internal LB functionality are all
# successfully created and enabled.
# 2. Create Nuage VSP VPC offering with LB service provider as "InternalLbVm", check if it is successfully
# created and enabled. Verify that the VPC creation succeeds with this VPC offering.
# 3. Create Nuage VSP VPC offering with LB service provider as "VpcVirtualRouter", check if it is successfully
# created and enabled. Verify that the VPC creation fails with this VPC offering as Nuage VSP does not
# support provider "VpcVirtualRouter" for service LB.
# 4. Create Nuage VSP VPC offering with LB service provider as "Netscaler", check if it is successfully
# created and enabled. Verify that the VPC creation fails with this VPC offering as Nuage VSP does not
# support provider "Netscaler" for service LB.
# 5. Delete the created VPC offerings (cleanup).
self.debug("Validating network service providers supported by Nuage VSP for VPC Internal LB functionality")
providers = ["NuageVsp", "VpcVirtualRouter", "InternalLbVm"]
for provider in providers:
self.validate_NetworkServiceProvider(provider, state="Enabled")
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with LB service provider as InternalLbVm...")
vpc_off_1 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with LB service provider as VpcVirtualRouter...")
vpc_offering_lb = copy.deepcopy(self.test_data["nuagevsp"]["vpc_offering_lb"])
vpc_offering_lb["serviceProviderList"]["Lb"] = "VpcVirtualRouter"
vpc_off_2 = self.create_VpcOffering(vpc_offering_lb)
self.validate_VpcOffering(vpc_off_2, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with LB service provider as Netscaler...")
vpc_offering_lb["serviceProviderList"]["Lb"] = "Netscaler"
vpc_off_3 = self.create_VpcOffering(vpc_offering_lb)
self.validate_VpcOffering(vpc_off_3, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without LB service...")
vpc_off_4 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_4, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with LB service provider as InternalLbVm...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC with LB service provider as VpcVirtualRouter...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider VpcVirtualRouter for service LB for VPCs")
self.debug("Creating a VPC with LB service provider as Netscaler...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider Netscaler for service LB for VPCs")
self.debug("Creating a VPC without LB service...")
vpc_2 = self.create_Vpc(vpc_off_4, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_02_nuage_internallb_vpc_network_offering(self):
"""Test Nuage VSP VPC Network Offering with and without Internal LB service
"""
# 1. Create Nuage Vsp VPC Network offering with LB Service Provider as "InternalLbVm" and LB Service Capability
# "lbSchemes" as "internal", check if it is successfully created and enabled. Verify that the VPC network
# creation succeeds with this Network offering.
# 2. Recreate above Network offering with ispersistent False, check if it is successfully created and enabled.
# Verify that the VPC network creation fails with this Network offering as Nuage VSP does not support non
# persistent VPC networks.
# 3. Recreate above Network offering with conserve mode On, check if the network offering creation failed
# as only networks with conserve mode Off can belong to VPC.
# 4. Create Nuage Vsp VPC Network offering with LB Service Provider as "InternalLbVm" and LB Service Capability
# "lbSchemes" as "public", check if the network offering creation failed as "public" lbScheme is not
# supported for LB Service Provider "InternalLbVm".
# 5. Create Nuage Vsp VPC Network offering without Internal LB Service, check if it is successfully created and
# enabled. Verify that the VPC network creation succeeds with this Network offering.
# 6. Recreate above Network offering with ispersistent False, check if it is successfully created and enabled.
# Verify that the VPC network creation fails with this Network offering as Nuage VSP does not support non
# persistent VPC networks.
# 7. Recreate the above Network offering with conserve mode On, check if the network offering creation failed
# as only networks with conserve mode Off can belong to VPC.
# 8. Delete the created Network offerings (cleanup).
# Creating VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with LB Service Provider as InternalLbVm and LB Service "
"Capability lbSchemes as internal...")
net_off_1 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Recreating above Network offering with ispersistent False...")
vpc_net_off_lb_non_persistent = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
vpc_net_off_lb_non_persistent["ispersistent"] = "False"
net_off_2 = self.create_NetworkOffering(vpc_net_off_lb_non_persistent)
self.validate_NetworkOffering(net_off_2, state="Enabled")
self.debug("Recreating above Network offering with conserve mode On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"],
conserve_mode=True)
self.debug("Network offering creation failed as only networks with conserve mode Off can belong to VPC")
self.debug("Creating Nuage VSP VPC Network offering with LB Service Provider as InternalLbVm and LB Service "
"Capability lbSchemes as public...")
network_offering_internal_lb = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
network_offering_internal_lb["serviceCapabilityList"]["Lb"]["lbSchemes"] = "public"
with self.assertRaises(Exception):
self.create_NetworkOffering(network_offering_internal_lb)
self.debug("Network offering creation failed as public lbScheme is not supported for LB Service Provider "
"InternalLbVm")
self.debug("Creating Nuage Vsp VPC Network offering without Internal LB service...")
net_off_3 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_3, state="Enabled")
self.debug("Recreating above Network offering with ispersistent False...")
vpc_net_off_non_persistent = copy.deepcopy(self.test_data["nuagevsp"]["vpc_network_offering"])
vpc_net_off_non_persistent["ispersistent"] = "False"
net_off_4 = self.create_NetworkOffering(vpc_net_off_non_persistent)
self.validate_NetworkOffering(net_off_4, state="Enabled")
self.debug("Recreating above Network offering with conserve mode On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"], conserve_mode=True)
self.debug("Network offering creation failed as only networks with conserve mode Off can belong to VPC")
# Creating VPC networks in the VPC
self.debug("Creating a persistent VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.debug("Creating a non persistent VPC network with Internal LB service...")
with self.assertRaises(Exception):
self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.debug("Nuage VSP does not support non persistent VPC networks")
self.debug("Creating a persistent VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_3, gateway='10.1.3.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.debug("Creating a non persistent VPC network without Internal LB service...")
with self.assertRaises(Exception):
self.create_Network(net_off_4, gateway='10.1.4.1', vpc=vpc)
self.debug("Nuage VSP does not support non persistent VPC networks")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_03_nuage_internallb_vpc_networks(self):
"""Test Nuage VSP VPC Networks with and without Internal LB service
"""
# 1. Create Nuage VSP VPC offering with Internal LB service, check if it is successfully created and enabled.
# 2. Create Nuage VSP VPC offering without Internal LB service, check if it is successfully created and enabled.
# 3. Create a VPC "vpc_1" with Internal LB service, check if it is successfully created and enabled.
# 4. Create a VPC "vpc_2" without Internal LB service, check if it is successfully created and enabled.
# 5. Create Nuage VSP VPC Network offering with Internal LB service, check if it is successfully created and
# enabled.
# 6. Create Nuage VSP VPC Network offering without Internal LB service, check if it is successfully created and
# enabled.
# 7. Create a VPC network in vpc_1 with Internal LB service and spawn a VM, check if the tier is added to the
# VPC VR, and the VM is deployed successfully in the tier.
# 8. Create one more VPC network in vpc_1 with Internal LB service and spawn a VM, check if the tier is added
# to the VPC VR, and the VM is deployed successfully in the tier.
# 9. Create a VPC network in vpc_2 with Internal LB service, check if the tier creation failed.
# 10. Create a VPC network in vpc_1 without Internal LB service and spawn a VM, check if the tier is added to
# the VPC VR, and the VM is deployed successfully in the tier.
# 11. Create a VPC network in vpc_2 without Internal LB service and spawn a VM, check if the tier is added to
# the VPC VR, and the VM is deployed successfully in the tier.
# 12. Upgrade the VPC network with Internal LB service to one with no Internal LB service and vice-versa, check
# if the VPC Network offering upgrade passed in both directions.
# 13. Delete the VPC network with Internal LB service, check if the tier is successfully deleted.
# 14. Recreate the VPC network with Internal LB service, check if the tier is successfully re-created.
# 15. Delete all the created objects (cleanup).
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off_1 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without Internal LB service...")
vpc_off_2 = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_2, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with Internal LB service...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC without Internal LB service...")
vpc_2 = self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in VPCs, and deploying VMs
self.debug("Creating a VPC network in vpc_1 with Internal LB service...")
internal_tier_1 = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_1)
self.validate_Network(internal_tier_1, state="Implemented")
vr_1 = self.get_Router(internal_tier_1)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_1.name)
internal_vm_1 = self.create_VM(internal_tier_1)
self.check_VM_state(internal_vm_1, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_1, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_1)
self.debug("Creating one more VPC network in vpc_1 with Internal LB service...")
internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc_1)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_2.name)
internal_vm_2 = self.create_VM(internal_tier_2)
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
self.debug("Creating a VPC network in vpc_2 with Internal LB service...")
with self.assertRaises(Exception):
self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_2)
self.debug("VPC Network creation failed as vpc_2 does not support Internal Lb service")
self.debug("Creating a VPC network in vpc_1 without Internal LB service...")
public_tier_1 = self.create_Network(net_off_2, gateway='10.1.3.1', vpc=vpc_1)
self.validate_Network(public_tier_1, state="Implemented")
vr_1 = self.get_Router(public_tier_1)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier_1.name)
public_vm_1 = self.create_VM(public_tier_1)
self.check_VM_state(public_vm_1, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier_1, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(public_vm_1)
self.debug("Creating a VPC network in vpc_2 without Internal LB service...")
public_tier_2 = self.create_Network(net_off_2, gateway='10.1.1.1', vpc=vpc_2)
self.validate_Network(public_tier_2, state="Implemented")
vr_2 = self.get_Router(public_tier_2)
self.check_Router_state(vr_2, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier_2.name)
public_vm_2 = self.create_VM(public_tier_2)
self.check_VM_state(public_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier_2, vpc_2)
self.verify_vsp_router(vr_2)
self.verify_vsp_vm(public_vm_2)
# Upgrading a VPC network
self.debug("Upgrading a VPC network with Internal LB Service to one without Internal LB Service...")
self.upgrade_Network(net_off_2, internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
self.debug("Upgrading a VPC network without Internal LB Service to one with Internal LB Service...")
self.upgrade_Network(net_off_1, internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
# Deleting and re-creating a VPC network
self.debug("Deleting a VPC network with Internal LB Service...")
self.delete_VM(internal_vm_2)
self.delete_Network(internal_tier_2)
with self.assertRaises(Exception):
self.validate_Network(internal_tier_2)
self.debug("VPC network successfully deleted in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.debug("VPC network successfully deleted in VSD")
self.debug("Recreating a VPC network with Internal LB Service...")
internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc_1)
internal_vm_2 = self.create_VM(internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsp_router(vr_1)
self.verify_vsp_vm(internal_vm_2)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_04_nuage_internallb_rules(self):
"""Test Nuage VSP VPC Internal LB functionality with different combinations of Internal LB rules
"""
# 1. Create an Internal LB Rule with source IP Address specified, check if the Internal LB Rule is successfully
# created.
# 2. Create an Internal LB Rule without source IP Address specified, check if the Internal LB Rule is
# successfully created.
# 3. Create an Internal LB Rule when the specified source IP Address is outside the VPC network (tier) CIDR
# range, check if the Internal LB Rule creation failed as the requested source IP is not in the network's
# CIDR subnet.
# 4. Create an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR range,
# check if the Internal LB Rule creation failed as the requested source IP is not in the network's CIDR
# subnet.
# 5. Create an Internal LB Rule in the tier with LB service provider as VpcInlineLbVm, check if the Internal LB
# Rule creation failed as Scheme Internal is not supported by this network offering.
# 6. Create multiple Internal LB Rules using different Load Balancing source IP Addresses, check if the Internal
# LB Rules are successfully created.
# 7. Create multiple Internal LB Rules with different ports but using the same Load Balancing source IP Address,
# check if the Internal LB Rules are successfully created.
# 8. Create multiple Internal LB Rules with same ports and using the same Load Balancing source IP Address,
# check if the second Internal LB Rule creation failed as it conflicts with the first Internal LB rule.
# 9. Attach a VM to the above created Internal LB Rules, check if the VM is successfully attached to the
# Internal LB Rules.
# 10. Verify the InternalLbVm deployment after successfully creating the first Internal LB Rule and attaching a
# VM to it.
# 11. Verify the failure of attaching a VM from a different tier to an Internal LB Rule created on a tier.
# 12. Delete the above created Internal LB Rules, check if the Internal LB Rules are successfully deleted.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules
self.debug("Creating an Internal LB Rule without source IP Address specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
free_source_ip = int_lb_rule.sourceipaddress
self.debug("Creating an Internal LB Rule with source IP Address specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier, source_ip=free_source_ip)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC network CIDR "
"range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.1.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule when the specified source IP Address is outside the VPC super CIDR "
"range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.2.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule in a VPC network without Internal Lb service...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(public_tier)
self.debug("Internal LB Rule creation failed as Scheme Internal is not supported by this network offering")
self.debug("Creating multiple Internal LB Rules using different Load Balancing source IP Addresses...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVms deployment and state
int_lb_vm_1 = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
int_lb_vm_2 = self.get_InternalLbVm(internal_tier, int_lb_rule_2.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
self.verify_vsp_LB_device(int_lb_vm_2)
self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVms state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
self.verify_vsp_LB_device(int_lb_vm_2)
self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVms un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_2.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm_1)
self.debug("InternalLbVm successfully destroyed in VSD")
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm_2)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Creating multiple Internal LB Rules with different ports but using the same Load Balancing source "
"IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Removing VMs from the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Deleting the Internal LB Rules - %s, %s' % (int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Creating multiple Internal LB Rules with same ports and using the same Load Balacing source IP "
"Address...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule, state="Active", vm_array=[internal_vm])
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm], source_ip=int_lb_rule.sourceipaddress)
self.debug("Internal LB Rule creation failed as it conflicts with the existing rule")
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Removing VMs from the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsp_LB_device(int_lb_vm)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Attaching a VM from a different tier to an Internal LB Rule created on a tier...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm])
self.debug("Internal LB Rule creation failed as the VM belongs to a different network")
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_05_nuage_internallb_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality by performing (wget) traffic tests within a VPC
"""
# 1. Create an Internal LB Rule "internal_lbrule" with source IP Address specified on the Internal tier, check
# if the Internal LB Rule is successfully created.
# 2. Create an Internal LB Rule "internal_lbrule_http" with source IP Address (same as above) specified on the
# Internal tier, check if the Internal LB Rule is successfully created.
# 3. Attach a VM to the above created Internal LB Rules, check if the InternalLbVm is successfully deployed in
# the Internal tier.
# 4. Deploy two more VMs in the Internal tier, check if the VMs are successfully deployed.
# 5. Attach the newly deployed VMs to the above created Internal LB Rules, verify the validity of the above
# created Internal LB Rules over three Load Balanced VMs in the Internal tier.
# 6. Create the corresponding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible,
# check if the Network ACL rules are successfully added to the internal tier.
# 7. Validate the Internal LB functionality by performing (wget) traffic tests from a VM in the Public tier to
# the Internal load balanced guest VMs in the Internal tier, using Static NAT functionality to access (ssh)
# the VM on the Public tier.
# 8. Verify that the InternalLbVm gets destroyed when the last Internal LB rule is removed from the Internal
# tier.
# 9. Repeat the above steps for one more Internal tier as well, validate the Internal LB functionality.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier_1 = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier_1, state="Implemented")
vr = self.get_Router(internal_tier_1)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_1.name)
internal_vm_1 = self.create_VM(internal_tier_1)
self.check_VM_state(internal_vm_1, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_1, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm_1)
self.debug("Creating one more VPC network with Internal LB service...")
internal_tier_2 = self.create_Network(net_off_1, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(internal_tier_2, state="Implemented")
vr = self.get_Router(internal_tier_2)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier_2.name)
internal_vm_2 = self.create_VM(internal_tier_2)
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier_2, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm_2)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.3.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules in the Internal tiers
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier_1, vm_array=[internal_vm_1])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm_1])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier_1,
vm_array=[internal_vm_1],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm_1])
# Validating InternalLbVm deployment and state
int_lb_vm_1 = self.get_InternalLbVm(internal_tier_1, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier_1.name)
internal_vm_1_1 = self.create_VM(internal_tier_1)
internal_vm_1_2 = self.create_VM(internal_tier_1)
# VSD verification
self.verify_vsp_vm(internal_vm_1_1)
self.verify_vsp_vm(internal_vm_1_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier_1)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier_1)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier_2, vm_array=[internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active", vm_array=[internal_vm_2])
int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier_2,
vm_array=[internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_3.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active", vm_array=[internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_2 = self.get_InternalLbVm(internal_tier_2, int_lb_rule_3.sourceipaddress)
self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_2)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier_2.name)
internal_vm_2_1 = self.create_VM(internal_tier_2)
internal_vm_2_2 = self.create_VM(internal_tier_2)
# VSD verification
self.verify_vsp_vm(internal_vm_2_1)
self.verify_vsp_vm(internal_vm_2_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_3.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
int_lb_rule_4.assign(self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier_2, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_2)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier_2)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier_2)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic tests
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_1 = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_2 = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_3.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic tests
self.verify_lb_wget_file(wget_file_1, [internal_vm_1, internal_vm_1_1, internal_vm_1_2])
self.verify_lb_wget_file(wget_file_2, [internal_vm_2, internal_vm_2_1, internal_vm_2_2])
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_06_nuage_internallb_algorithms_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with different LB algorithms by performing (wget) traffic tests
within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with different Internal LB algorithms:
# 1. Round Robin
# 2. Least connections
# 3. Source
# Verify the above Internal LB algorithms by performing multiple (wget) traffic tests within a VPC.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules in the Internal tier with Round Robin Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Round Robin Algorithm...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm_1 = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_1)
# Creating Internal LB Rules in the Internal tier with Least connections Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Least connections Algorithm...")
self.test_data["internal_lbrule"]["algorithm"] = "leastconn"
int_lb_rule_3 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule"]
)
self.validate_Internal_LB_Rule(int_lb_rule_3, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
self.test_data["internal_lbrule_http"]["algorithm"] = "leastconn"
int_lb_rule_4 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_3.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_4, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_2 = self.get_InternalLbVm(internal_tier, int_lb_rule_3.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_2)
# Creating Internal LB Rules in the Internal tier with Source Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Source Algorithm...")
self.test_data["internal_lbrule"]["algorithm"] = "source"
int_lb_rule_5 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule"]
)
self.validate_Internal_LB_Rule(int_lb_rule_5, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
self.test_data["internal_lbrule_http"]["algorithm"] = "source"
int_lb_rule_6 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_5.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_6, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_3 = self.get_InternalLbVm(internal_tier, int_lb_rule_5.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_5.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm_3)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic tests with Round Robin Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2],
"roundrobin"
)
# Internal LB (wget) traffic tests with Least connections Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(ssh_client,
int_lb_rule_3.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2],
"leastconn"
)
# Internal LB (wget) traffic tests with Source Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(ssh_client,
int_lb_rule_5.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2],
"source"
)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with restarts of VPC network components by performing (wget)
traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with restarts of VPC networks (tiers):
# 1. Restart tier with InternalLbVm (cleanup = false), verify that the InternalLbVm gets destroyed and deployed
# again in the Internal tier.
# 2. Restart tier with InternalLbVm (cleanup = true), verify that the InternalLbVm gets destroyed and deployed
# again in the Internal tier.
# 3. Restart tier without InternalLbVm (cleanup = false), verify that this restart has no effect on the
# InternalLbVm functionality.
# 4. Restart tier without InternalLbVm (cleanup = true), verify that this restart has no effect on the
# InternalLbVm functionality.
# 5. Stop all the VMs configured with InternalLbVm, verify that the InternalLbVm gets destroyed in the Internal
# tier.
# 6. Start all the VMs configured with InternalLbVm, verify that the InternalLbVm gets deployed again in the
# Internal tier.
# 7. Restart VPC, verify that the VPC VR gets rebooted and this restart has no effect on the InternalLbVm
# functionality.
# Verify the above restarts of VPC networks (tiers) by performing (wget) traffic tests within a VPC.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Internal tier (cleanup = false)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier without cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=False)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
# InternalLbVm gets destroyed and deployed again in the Internal tier
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier: %s" % e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Internal tier (cleanup = true)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier with cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=True)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
# InternalLbVm gets destroyed and deployed again in the Internal tier
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the Internal tier with cleanup: "
"%s" % e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm in the Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the Internal tier with cleanup")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Public tier (cleanup = false)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier without cleanup...")
Network.restart(public_tier, self.api_client, cleanup=False)
self.validate_Network(public_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Public tier (cleanup = true)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier with cleanup...")
Network.restart(public_tier, self.api_client, cleanup=True)
self.validate_Network(public_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Stopping VMs in the Internal tier
# wget traffic test fails as all the VMs in the Internal tier are in stopped state
self.debug("Stopping all the VMs in the Internal tier...")
internal_vm.stop(self.api_client)
internal_vm_1.stop(self.api_client)
internal_vm_2.stop(self.api_client)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Stopped")
self.check_VM_state(internal_vm_1, state="Stopped")
self.check_VM_state(internal_vm_2, state="Stopped")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm, stopped=True)
self.verify_vsp_vm(internal_vm_1, stopped=True)
self.verify_vsp_vm(internal_vm_2, stopped=True)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as all the VMs in the Internal tier are in stopped state")
# Starting VMs in the Internal tier
# wget traffic test succeeds as all the VMs in the Internal tier are back in running state
self.debug("Starting all the VMs in the Internal tier...")
internal_vm.start(self.api_client)
internal_vm_1.start(self.api_client)
internal_vm_2.start(self.api_client)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting all the VMs in the Internal tier"
": %s" % e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm and all the VMs in the Internal tier to be fully resolved for "
"(wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting all the VMs in the Internal "
"tier")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restarting VPC (cleanup = false)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC without cleanup...")
self.restart_Vpc(vpc, cleanup=False)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restarting VPC (cleanup = true)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC with cleanup...")
self.restart_Vpc(vpc, cleanup=True)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
self.verify_vsp_vm(internal_vm)
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsp_firewall_rule(public_ssh_rule)
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_08_nuage_internallb_appliance_operations_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with InternalLbVm appliance operations by performing (wget)
traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic" with InternalLbVm appliance operations:
# 1. Verify the InternalLbVm deployment by creating the Internal LB Rules when the VPC VR is in Stopped state,
# VPC VR has no effect on the InternalLbVm functionality.
# 2. Stop the InternalLbVm when the VPC VR is in Stopped State
# 3. Start the InternalLbVm when the VPC VR is in Stopped state
# 4. Stop the InternalLbVm when the VPC VR is in Running State
# 5. Start the InternalLbVm when the VPC VR is in Running state
# 6. Force stop the InternalLbVm when the VPC VR is in Running State
# 7. Start the InternalLbVm when the VPC VR is in Running state
# Verify the above restarts of VPC networks by performing (wget) traffic tests within a VPC.
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB service...")
vpc_off = self.create_VpcOffering(self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal LB service...")
net_off_2 = self.create_NetworkOffering(self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % internal_tier.name)
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in network - %s" % public_tier.name)
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_router(vr)
self.verify_vsp_vm(public_vm)
# Stopping the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.stop(self.api_client, id=vr.id)
self.check_Router_state(vr, state="Stopped")
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
# VSD verification
self.verify_vsp_router(vr, stopped=True)
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(internal_tier,
vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress
)
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" % internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsp_vm(internal_vm_1)
self.verify_vsp_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB rules (SSH & HTTP) accessible...")
ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=internal_tier)
http_rule = self.create_NetworkAclRule(self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsp_firewall_rule(ssh_rule)
self.verify_vsp_firewall_rule(http_rule)
# Creating Static NAT Rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsp_floating_ip(public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule (SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsp_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# # Stopping the InternalLbVm when the VPC VR is in Stopped state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm, stopped=True)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Stopped state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Starting the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.start(self.api_client, id=vr.id)
self.check_Router_state(vr)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
# VSD verification
self.verify_vsp_router(vr)
self.verify_vsp_network(self.domain.id, public_tier, vpc)
self.verify_vsp_network(self.domain.id, internal_tier, vpc)
# # Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm, stopped=True)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# # Force Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm, force=True)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm, stopped=True)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
with self.assertRaises(Exception):
self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
self.debug("Failed to wget file as the InternalLbVm is in stopped state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsp_LB_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while True:
try:
wget_file = self.wget_from_vm_cmd(ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"]
)
except Exception as e:
self.debug("Failed to wget file via the InternalLbVm after re-starting the InternalLbVm appliance: %s"
% e)
if tries == 10:
break
self.debug("Waiting for the InternalLbVm to be fully resolved for (wget) traffic test...")
time.sleep(30)
tries += 1
continue
self.debug("Internal LB (wget) traffic test is successful after re-starting the InternalLbVm appliance")
break
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(wget_file, [internal_vm, internal_vm_1, internal_vm_2])
| 53.894559
| 120
| 0.662271
| 14,596
| 111,939
| 4.812894
| 0.030625
| 0.050677
| 0.035346
| 0.012954
| 0.897636
| 0.876738
| 0.856254
| 0.84051
| 0.826318
| 0.800865
| 0
| 0.011378
| 0.261187
| 111,939
| 2,076
| 121
| 53.92052
| 0.838045
| 0.186057
| 0
| 0.697199
| 0
| 0
| 0.194419
| 0.003764
| 0
| 0
| 0
| 0
| 0.036336
| 1
| 0.01514
| false
| 0
| 0.004542
| 0
| 0.026495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a26c83f86552f2cfab1b7dd7128fb2b0f1d7428
| 7,856
|
py
|
Python
|
tests/test_observable/test_takewhile.py
|
MichaelSchneeberger/RxPY
|
994f974d37783f63c5d9e018a316fa9b06ba9337
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_observable/test_takewhile.py
|
MichaelSchneeberger/RxPY
|
994f974d37783f63c5d9e018a316fa9b06ba9337
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_observable/test_takewhile.py
|
MichaelSchneeberger/RxPY
|
994f974d37783f63c5d9e018a316fa9b06ba9337
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import unittest
import rx
from rx import operators as ops
from rx.testing import TestScheduler, ReactiveTest, is_prime
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestTakeWhile(unittest.TestCase):
def test_take_while_complete_Before(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_completed(330), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(
260, 5), on_next(290, 13), on_next(320, 3), on_completed(330)]
assert xs.subscriptions == [subscribe(200, 330)]
assert(invoked == 4)
def test_take_while_complete_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_completed(390)]
assert xs.subscriptions == [subscribe(200, 390)]
assert(invoked == 6)
def test_take_while_error_before(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_error(
270, ex), on_next(290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_error(270, ex)]
assert xs.subscriptions == [subscribe(200, 270)]
assert(invoked == 2)
def test_take_while_error_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_error(600, 'ex'))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_completed(390)]
assert xs.subscriptions == [subscribe(200, 390)]
assert(invoked == 6)
def test_take_while_dispose_before(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(create, disposed=300)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(290, 13)]
assert xs.subscriptions == [subscribe(200, 300)]
assert(invoked == 3)
def test_take_while_dispose_after(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(create, disposed=400)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_completed(390)]
assert xs.subscriptions == [subscribe(200, 390)]
assert(invoked == 6)
def test_take_while_zero(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(205, 100), on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def create():
def predicate(x):
nonlocal invoked
invoked += 1
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(create, disposed=300)
assert results.messages == [on_completed(205)]
assert xs.subscriptions == [subscribe(200, 205)]
assert (invoked == 1)
def test_take_while_on_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(210, 2), on_next(260, 5), on_next(290, 13), on_next(
320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
invoked = 0
def factory():
def predicate(x):
nonlocal invoked
invoked += 1
if invoked == 3:
raise Exception(ex)
return is_prime(x)
return xs.pipe(ops.take_while(predicate))
results = scheduler.start(factory)
assert results.messages == [on_next(210, 2), on_next(260, 5), on_error(290, ex)]
assert xs.subscriptions == [subscribe(200, 290)]
assert(invoked == 3)
def test_take_while_index(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(90, -1), on_next(110, -1), on_next(205, 100), on_next(210, 2), on_next(260, 5), on_next(
290, 13), on_next(320, 3), on_next(350, 7), on_next(390, 4), on_next(410, 17), on_next(450, 8), on_next(500, 23), on_completed(600))
def factory():
return xs.pipe(ops.take_while_indexed(lambda x, i: i < 5))
results = scheduler.start(factory)
assert results.messages == [on_next(205, 100), on_next(210, 2), on_next(
260, 5), on_next(290, 13), on_next(320, 3), on_completed(350)]
assert xs.subscriptions == [subscribe(200, 350)]
| 41.787234
| 145
| 0.596741
| 1,106
| 7,856
| 4.033454
| 0.08047
| 0.180229
| 0.028245
| 0.038108
| 0.884555
| 0.835239
| 0.814616
| 0.801166
| 0.801166
| 0.790182
| 0
| 0.122189
| 0.26973
| 7,856
| 187
| 146
| 42.010695
| 0.655395
| 0
| 0
| 0.693333
| 0
| 0
| 0.000764
| 0
| 0
| 0
| 0
| 0
| 0.173333
| 1
| 0.173333
| false
| 0
| 0.026667
| 0.006667
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a3c83fc8d3f89205d28360945fbde9c19464a13
| 25
|
py
|
Python
|
14.py
|
jsplyy/PythonTip
|
e0d6a4471caa4abf1ce1d3b0789beb39704b91e7
|
[
"Apache-2.0"
] | null | null | null |
14.py
|
jsplyy/PythonTip
|
e0d6a4471caa4abf1ce1d3b0789beb39704b91e7
|
[
"Apache-2.0"
] | null | null | null |
14.py
|
jsplyy/PythonTip
|
e0d6a4471caa4abf1ce1d3b0789beb39704b91e7
|
[
"Apache-2.0"
] | null | null | null |
import this
print this.s
| 12.5
| 12
| 0.8
| 5
| 25
| 4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 2
| 13
| 12.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
5a568e30569076d5d96b8101e66e4049d7fc2dcf
| 37
|
py
|
Python
|
openslides/utils/auth/__init__.py
|
DebVortex/OpenSlides
|
f17f1a723a034dd7ebe80cd4ff4385d97d020c5f
|
[
"MIT"
] | null | null | null |
openslides/utils/auth/__init__.py
|
DebVortex/OpenSlides
|
f17f1a723a034dd7ebe80cd4ff4385d97d020c5f
|
[
"MIT"
] | null | null | null |
openslides/utils/auth/__init__.py
|
DebVortex/OpenSlides
|
f17f1a723a034dd7ebe80cd4ff4385d97d020c5f
|
[
"MIT"
] | null | null | null |
from .AnonymousAuth import * # noqa
| 18.5
| 36
| 0.72973
| 4
| 37
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 37
| 1
| 37
| 37
| 0.9
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a6bc0ea58a494099c0c7001e41e5d0c095d80c1
| 30,288
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import sys
from httmock import response # noqa
from httmock import urlmatch # noqa
from ansible_collections.community.general.tests.unit.compat import unittest
from gitlab import Gitlab
class FakeAnsibleModule(object):
def __init__(self):
self.check_mode = False
def fail_json(self, **args):
pass
def exit_json(self, **args):
pass
class GitlabModuleTestCase(unittest.TestCase):
def setUp(self):
unitest_python_version_check_requirement(self)
self.mock_module = FakeAnsibleModule()
self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4)
# Python 2.7+ is needed for python-gitlab
GITLAB_MINIMUM_PYTHON_VERSION = (2, 7)
# Verify if the current Python version is higher than GITLAB_MINIMUM_PYTHON_VERSION
def python_version_match_requirement():
return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION
# Skip unittest test case if python version don't match requirement
def unitest_python_version_check_requirement(unittest_testcase):
if not python_version_match_requirement():
unittest_testcase.skipTest("Python %s+ is needed for python-gitlab" % ",".join(map(str, GITLAB_MINIMUM_PYTHON_VERSION)))
'''
USER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get")
def resp_find_user(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith"}, {"id": 2,'
'"username": "jack_smith", "name": "Jack Smith", "state": "blocked",'
'"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",'
'"web_url": "http://localhost:3000/jack_smith"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post")
def resp_create_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",'
'"bio": null, "location": null, "public_email": "john@example.com", "skype": "",'
'"linkedin": "", "twitter": "", "website_url": "", "organization": ""}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith",'
'"state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith",'
'"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,'
'"public_email": "john@example.com", "skype": "", "linkedin": "",'
'"twitter": "", "website_url": "", "organization": "", "is_admin": false}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
'''
USER SSHKEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get")
def resp_get_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa'
'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,'
'"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS'
'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post")
def resp_create_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "title": "Private key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z'
'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy'
'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH'
'2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9'
'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",'
'"created_at": "2014-08-01T14:47:39.080Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
'''
GROUP API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get")
def resp_find_group(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "bar-foo",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_missing_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_subgroup(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",'
'"file_template_project_id": 1, "parent_id": 1}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
GROUP MEMBER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get")
def resp_get_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get")
def resp_find_member(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{'
'"id": 2, "username": "john_doe", "name": "John Doe","state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post")
def resp_add_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put")
def resp_update_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 10}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
'''
DEPLOY KEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get")
def resp_find_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T11:12:29Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get")
def resp_get_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post")
def resp_create_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete")
def resp_delete_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
PROJECT API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get")
def resp_find_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get")
def resp_get_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get")
def resp_get_project_by_name(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get")
def resp_find_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get")
def resp_get_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post")
def resp_create_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete")
def resp_delete_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
HOOK API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get")
def resp_find_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get")
def resp_get_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post")
def resp_create_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete")
def resp_delete_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
RUNNER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/all", method="get")
def resp_find_runners_all(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="get")
def resp_find_runners_list(url, request):
headers = {'content-type': 'application/json',
"X-Page": 1,
"X-Next-Page": 2,
"X-Per-Page": 1,
"X-Total-Pages": 1,
"X-Total": 2}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="get")
def resp_get_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="post")
def resp_create_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="delete")
def resp_delete_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
| 52.130809
| 137
| 0.634773
| 3,532
| 30,288
| 5.30974
| 0.090034
| 0.041804
| 0.049269
| 0.04863
| 0.907913
| 0.898902
| 0.881305
| 0.872454
| 0.872454
| 0.851392
| 0
| 0.055058
| 0.178454
| 30,288
| 580
| 138
| 52.22069
| 0.69863
| 0.012216
| 0
| 0.663616
| 0
| 0.130435
| 0.559185
| 0.1545
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100687
| false
| 0.004577
| 0.01373
| 0.002288
| 0.208238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a7f83046bd6d83bc2ab6bd8a9228833d913dfdc
| 4,542
|
py
|
Python
|
tests/test_lsh.py
|
simonemainardi/LSHash
|
c7b02f899b32982ca69134cae707176489f73f48
|
[
"MIT"
] | 61
|
2015-02-05T23:32:20.000Z
|
2022-01-26T12:21:45.000Z
|
tests/test_lsh.py
|
simonemainardi/LSHash
|
c7b02f899b32982ca69134cae707176489f73f48
|
[
"MIT"
] | null | null | null |
tests/test_lsh.py
|
simonemainardi/LSHash
|
c7b02f899b32982ca69134cae707176489f73f48
|
[
"MIT"
] | 11
|
2015-03-12T20:27:08.000Z
|
2021-10-14T00:48:23.000Z
|
import random
import string
from unittest import TestCase
from redis import StrictRedis
from pprint import pprint
import sys
import os
# add the LSHash package to the current python path
sys.path.insert(0, os.path.abspath('../'))
# now we can use our lshash package and not the standard one
from lshash import LSHash
class TestLSHash(TestCase):
num_elements = 100
def setUp(self):
self.els = []
self.el_names = []
for i in range(self.num_elements):
el = [random.randint(0, 100) for _ in range(8)]
elname = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
self.els.append(tuple(el))
self.el_names.append(elname)
def test_lshash(self):
lsh = LSHash(6, 8, 1)
for i in range(self.num_elements):
lsh.index(list(self.els[i]))
lsh.index(list(self.els[i])) # multiple insertions
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
assert itms.count(itm) == 1
for el in itm:
assert el in self.els
for el in self.els:
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
# res is a tuple containing the vector and the distance
el_v, el_dist = res
assert el_v in self.els
assert el_dist == 0
del lsh
def test_lshash_extra_val(self):
lsh = LSHash(6, 8, 1)
for i in range(self.num_elements):
lsh.index(list(self.els[i]), self.el_names[i])
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
for el in itm:
assert el[0] in self.els
assert el[1] in self.el_names
for el in self.els:
# res is a list, so we need to select the first entry only
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
# vector an name are in the first element of the tuple res[0]
el_v, el_name = res[0]
# the distance is in the second element of the tuple
el_dist = res[1]
assert el_v in self.els
assert el_name in self.el_names
assert el_dist == 0
del lsh
def test_lshash_redis(self):
"""
Test external lshash module
"""
config = {"redis": {"host": 'localhost', "port": 6379, "db": 15}}
sr = StrictRedis(**config['redis'])
sr.flushdb()
lsh = LSHash(6, 8, 1, config)
for i in range(self.num_elements):
lsh.index(list(self.els[i]))
lsh.index(list(self.els[i])) # multiple insertions should be prevented by the library
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
for el in itm:
assert itms.count(itm) == 1 # have multiple insertions been prevented?
assert el in self.els
for el in self.els:
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
el_v, el_dist = res
assert el_v in self.els
assert el_dist == 0
del lsh
sr.flushdb()
def test_lshash_redis_extra_val(self):
"""
Test external lshash module
"""
config = {"redis": {"host": 'localhost', "port": 6379, "db": 15}}
sr = StrictRedis(**config['redis'])
sr.flushdb()
lsh = LSHash(6, 8, 1, config)
for i in range(self.num_elements):
lsh.index(list(self.els[i]), self.el_names[i])
lsh.index(list(self.els[i]), self.el_names[i]) # multiple insertions
hasht = lsh.hash_tables[0]
itms = [hasht.get_list(k) for k in hasht.keys()]
for itm in itms:
assert itms.count(itm) == 1
for el in itm:
assert el[0] in self.els
assert el[1] in self.el_names
for el in self.els:
res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
# vector an name are in the first element of the tuple res[0]
el_v, el_name = res[0]
# the distance is in the second element of the tuple
el_dist = res[1]
assert el_v in self.els
assert el_name in self.el_names
assert el_dist == 0
del lsh
sr.flushdb()
| 35.484375
| 102
| 0.561867
| 660
| 4,542
| 3.769697
| 0.181818
| 0.059084
| 0.043408
| 0.045016
| 0.735531
| 0.730305
| 0.730305
| 0.719855
| 0.712621
| 0.70217
| 0
| 0.021171
| 0.334434
| 4,542
| 127
| 103
| 35.76378
| 0.801852
| 0.139586
| 0
| 0.777778
| 0
| 0
| 0.025201
| 0
| 0
| 0
| 0
| 0
| 0.191919
| 1
| 0.050505
| false
| 0
| 0.080808
| 0
| 0.151515
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce6f5f78e62d7efe7437fd9e040adcd0762b6b6c
| 29
|
py
|
Python
|
aiml/__init__.py
|
edjdavid/aiml
|
6035cf3575137a8022fd373b8be9cfe16ee4ec61
|
[
"Apache-2.0"
] | null | null | null |
aiml/__init__.py
|
edjdavid/aiml
|
6035cf3575137a8022fd373b8be9cfe16ee4ec61
|
[
"Apache-2.0"
] | null | null | null |
aiml/__init__.py
|
edjdavid/aiml
|
6035cf3575137a8022fd373b8be9cfe16ee4ec61
|
[
"Apache-2.0"
] | null | null | null |
from .models import MLModels
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cebff441d372278a23033ab3658582d2d203da40
| 6,387
|
py
|
Python
|
tests/core/p2p-proto/test_requests.py
|
Uxio0/trinity
|
38fa6826b80bbda4f608e6dca366e468c362e3c2
|
[
"MIT"
] | null | null | null |
tests/core/p2p-proto/test_requests.py
|
Uxio0/trinity
|
38fa6826b80bbda4f608e6dca366e468c362e3c2
|
[
"MIT"
] | null | null | null |
tests/core/p2p-proto/test_requests.py
|
Uxio0/trinity
|
38fa6826b80bbda4f608e6dca366e468c362e3c2
|
[
"MIT"
] | null | null | null |
import pytest
from p2p.exceptions import PeerConnectionLost
from trinity.protocol.eth.peer import (
ETHPeerPoolEventServer,
)
from tests.core.integration_test_helpers import (
FakeAsyncChainDB,
run_peer_pool_event_server,
run_proxy_peer_pool,
run_request_server,
)
from tests.core.peer_helpers import (
get_directly_linked_peers,
MockPeerPoolWithConnectedPeers,
)
@pytest.mark.asyncio
async def test_proxy_peer_requests(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_request_server(
server_event_bus,
FakeAsyncChainDB(chaindb_20.db)
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
headers = await proxy_peer.requests.get_block_headers(0, 1, 0, False)
assert len(headers) == 1
block_header = headers[0]
assert block_header.block_number == 0
receipts = await proxy_peer.requests.get_receipts(headers)
assert len(receipts) == 1
receipt = receipts[0]
assert receipt[1][0] == block_header.receipt_root
block_bundles = await proxy_peer.requests.get_block_bodies(headers)
assert len(block_bundles) == 1
first_bundle = block_bundles[0]
assert first_bundle[1][0] == block_header.transaction_root
node_data = await proxy_peer.requests.get_node_data((block_header.state_root,))
assert node_data[0][0] == block_header.state_root
@pytest.mark.asyncio
async def test_proxy_peer_requests_with_timeouts(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_block_headers(0, 1, 0, False, timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_receipts((), timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_block_bodies((), timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_node_data((), timeout=0.01)
@pytest.mark.asyncio
async def test_requests_when_peer_in_client_vanishs(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_request_server(
server_event_bus,
FakeAsyncChainDB(chaindb_20.db)
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
# We remove the peer from the client and assume to see PeerConnectionLost exceptions raised
client_peer_pool.connected_nodes.pop(client_peer.remote)
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_block_headers(0, 1, 0, False)
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_receipts(())
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_block_bodies(())
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_node_data(())
| 38.017857
| 99
| 0.664475
| 719
| 6,387
| 5.467316
| 0.125174
| 0.089545
| 0.060544
| 0.067158
| 0.804121
| 0.798525
| 0.774612
| 0.749936
| 0.749936
| 0.696515
| 0
| 0.01097
| 0.272115
| 6,387
| 167
| 100
| 38.245509
| 0.834588
| 0.013935
| 0
| 0.669173
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0
| false
| 0
| 0.037594
| 0
| 0.037594
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ced6d5bb1dce7ead52f4bb4118339e36d0d5fd22
| 32
|
py
|
Python
|
crnn/__init__.py
|
shunj-g/detectocr
|
a56ce04841186880843c4345eb6a08977133a6bc
|
[
"MIT"
] | 1
|
2021-02-26T05:15:39.000Z
|
2021-02-26T05:15:39.000Z
|
crnn/__init__.py
|
shunj-g/detectocr
|
a56ce04841186880843c4345eb6a08977133a6bc
|
[
"MIT"
] | null | null | null |
crnn/__init__.py
|
shunj-g/detectocr
|
a56ce04841186880843c4345eb6a08977133a6bc
|
[
"MIT"
] | null | null | null |
import crnn.crnn_torch as models
| 32
| 32
| 0.875
| 6
| 32
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 1
| 32
| 32
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cab5891e01d037ed87055cca18214c547edc0d9
| 87
|
py
|
Python
|
hpl2netCDF_client/config/__init__.py
|
achho/dl_toolbox
|
3c15fe7b373ac3f7e601c96be2d98248d73b3b6e
|
[
"BSD-3-Clause"
] | 5
|
2020-06-09T06:30:15.000Z
|
2021-12-29T09:16:11.000Z
|
hpl2netCDF_client/config/__init__.py
|
achho/dl_toolbox
|
3c15fe7b373ac3f7e601c96be2d98248d73b3b6e
|
[
"BSD-3-Clause"
] | null | null | null |
hpl2netCDF_client/config/__init__.py
|
achho/dl_toolbox
|
3c15fe7b373ac3f7e601c96be2d98248d73b3b6e
|
[
"BSD-3-Clause"
] | 3
|
2020-07-12T03:47:19.000Z
|
2021-12-08T16:29:23.000Z
|
import hpl2netCDF_client.hpl2netCDF_client
from hpl2netCDF_client.config import config
| 43.5
| 43
| 0.908046
| 11
| 87
| 6.909091
| 0.454545
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.068966
| 87
| 2
| 44
| 43.5
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cc22be2beac9a81fde7ce65ced9eb5cb73ddfcc
| 5,334
|
py
|
Python
|
examples/campaigns.py
|
AbdulMoeed-140212/mailwizz-python-sdk
|
669fd974382a8d61036e569f8c4a99838d045fa4
|
[
"MIT"
] | null | null | null |
examples/campaigns.py
|
AbdulMoeed-140212/mailwizz-python-sdk
|
669fd974382a8d61036e569f8c4a99838d045fa4
|
[
"MIT"
] | null | null | null |
examples/campaigns.py
|
AbdulMoeed-140212/mailwizz-python-sdk
|
669fd974382a8d61036e569f8c4a99838d045fa4
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from setup_api import setup
from mailwizz.endpoint.campaigns import Campaigns
"""
SETUP THE API
"""
setup()
"""
CREATE THE ENDPOINT
"""
endpoint = Campaigns()
"""
GET ALL ITEMS
"""
response = endpoint.get_campaigns(page=1, per_page=10)
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
GET ONE ITEM
"""
response = endpoint.get_campaign('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
CREATE ONE CAMPAIGN
"""
response = endpoint.create({
'name': 'My API Campaign', # required
'type': 'regular', # optional: regular or autoresponder
'from_name': 'John Doe', # required
'from_email': 'john.doe@doe.com', # required
'subject': 'Hey, i am testing the campaigns via API', # required
'reply_to': 'john.doe@doe.com', # required
'send_at': (datetime.now() + timedelta(hours=10)).strftime('%Y-%m-%d %H:%M:%S'),
# required, this will use the timezone which customer selected
'list_uid': 'LIST_UID', # required
# 'segment_uid' : 'SEGMENT-UNIQUE-ID',# optional, only to narrow down
# optional block, defaults are shown
'options': {
'url_tracking': 'no', # yes | no
'json_feed': 'no', # yes | no
'xml_feed': 'no', # yes | no
'plain_text_email': 'yes', # yes | no
'email_stats': None, # a valid email address where we should send the stats after campaign done
# - if autoresponder uncomment bellow:
# 'autoresponder_event' : 'AFTER-SUBSCRIBE', # AFTER-SUBSCRIBE or AFTER-CAMPAIGN-OPEN
# 'autoresponder_time_unit' : 'hour', # minute, hour, day, week, month, year
# 'autoresponder_time_value' : 1, # 1 hour after event
# 'autoresponder_open_campaign_id' : 1, # INT id of campaign, only if event is AFTER-CAMPAIGN-OPEN,
# - if this campaign is advanced recurring, you can set a cron job style frequency.
# - please note that this applies only for regular campaigns.
# 'cronjob' : '0 0 * * *', # once a day
# 'cronjob_enabled' : 1, # 1 or 0
},
# required block, archive or template_uid or content : required.
# the templates examples can be found here: Examples
'template': {
# 'archive' : open('template-example.zip', 'r').read(),
'template_uid': 'TEMPLATE_UID',
# 'content' : open('template-example.html', 'rb').read(),
'inline_css': 'no', # yes | no
# 'plain_text' : None, # leave empty to auto generate
'auto_plain_text': 'yes', # yes | no
},
})
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
UPDATE ONE CAMPAIGN
"""
response = endpoint.update('CAMPAIGN_UID', {
'name': 'My API Campaign - UPDATED', # required
'from_name': 'John Doe', # required
'from_email': 'john.doe@doe.com', # required
'subject': 'Hey, i am testing the campaigns via API', # required
'reply_to': 'john.doe@doe.com', # required
'send_at': (datetime.now() + timedelta(hours=10)).strftime('%Y-%m-%d %H:%M:%S'),
# required, this will use the timezone which customer selected
'list_uid': 'LIST_UID', # required
# 'segment_uid' : 'SEGMENT-UNIQUE-ID',# optional, only to narrow down
# optional block, defaults are shown
'options': {
'url_tracking': 'no', # yes | no
'json_feed': 'no', # yes | no
'xml_feed': 'no', # yes | no
'plain_text_email': 'yes', # yes | no
'email_stats': None, # a valid email address where we should send the stats after campaign done
# - if autoresponder uncomment bellow:
# 'autoresponder_event' : 'AFTER-SUBSCRIBE', # AFTER-SUBSCRIBE or AFTER-CAMPAIGN-OPEN
# 'autoresponder_time_unit' : 'hour', # minute, hour, day, week, month, year
# 'autoresponder_time_value' : 1, # 1 hour after event
# 'autoresponder_open_campaign_id' : 1, # INT id of campaign, only if event is AFTER-CAMPAIGN-OPEN,
# - if this campaign is advanced recurring, you can set a cron job style frequency.
# - please note that this applies only for regular campaigns.
# 'cronjob' : '0 0 * * *', # once a day
# 'cronjob_enabled' : 1, # 1 or 0
},
# required block, archive or template_uid or content : required.
# the templates examples can be found here: Examples
'template': {
# 'archive' : open('template-example.zip', 'r').read(),
'template_uid': 'TEMPLATE_UID',
# 'content' : open('template-example.html', 'rb').read(),
'inline_css': 'no', # yes | no
# 'plain_text' : None, # leave empty to auto generate
'auto_plain_text': 'yes', # yes | no
},
})
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
COPY ONE CAMPAIGN
"""
response = endpoint.copy('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
MARK ONE CAMPAIGN AS SENT
"""
response = endpoint.mark_sent('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
PAUSE/UNPAUSE ONE CAMPAIGN
"""
response = endpoint.pause_unpause('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
DELETE ONE CAMPAIGN
"""
response = endpoint.delete('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
| 28.98913
| 107
| 0.617923
| 650
| 5,334
| 4.963077
| 0.243077
| 0.018599
| 0.049597
| 0.069436
| 0.814321
| 0.803472
| 0.803472
| 0.732176
| 0.732176
| 0.732176
| 0
| 0.005676
| 0.240345
| 5,334
| 183
| 108
| 29.147541
| 0.790474
| 0.466067
| 0
| 0.677419
| 0
| 0
| 0.304404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.048387
| 0
| 0.048387
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cd13df446b07efcf591a7e92d8abe881bb70983
| 10,703
|
py
|
Python
|
intersight_universal_api_calls.py
|
ugo-emekauwa/intersight-universal-api-calls
|
29c4f5030b1909fd945ff81fd0e58bc75e784963
|
[
"Apache-2.0"
] | 1
|
2022-02-21T02:58:56.000Z
|
2022-02-21T02:58:56.000Z
|
intersight_universal_api_calls.py
|
ugo-emekauwa/intersight-universal-api-calls
|
29c4f5030b1909fd945ff81fd0e58bc75e784963
|
[
"Apache-2.0"
] | null | null | null |
intersight_universal_api_calls.py
|
ugo-emekauwa/intersight-universal-api-calls
|
29c4f5030b1909fd945ff81fd0e58bc75e784963
|
[
"Apache-2.0"
] | null | null | null |
"""
Cisco Intersight Universal API Calls Module, v1.1
Author: Ugo Emekauwa
Contact: uemekauw@cisco.com, uemekauwa@gmail.com
Summary: The Cisco Intersight Universal API Calls module provides
a set of functions that simplify creation, retrieval,
modification, and deletion of resources on Cisco Intersight.
"""
# Import needed Python modules
import sys
import json
import requests
import os
import intersight
from intersight.intersight_api_client import IntersightApiClient
# MODULE REQUIREMENT 1
"""
For the following variable below named key_id, please fill in between
the quotes your Intersight API Key ID.
Here is an example: key_id = "5c89885075646127773ec143/5c82fc477577712d3088eb2f/5c8987b17577712d302eaaff"
"""
key_id = ""
# MODULE REQUIREMENT 2
"""
For the following variable below named key, please fill in between
the quotes your system's file path to your Intersight API key "SecretKey.txt" file.
Here is an example: key = "C:\Keys\Key1\SecretKey.txt"
"""
key = ""
# Define Intersight SDK IntersightApiClient variables
# Tested on Cisco Intersight API Reference v1.0.9-853
base_url = "https://intersight.com/api/v1"
api_instance = IntersightApiClient(host=base_url,private_key=key,api_key_id=key_id)
# Establish Intersight Universal Functions
def iu_get(api_path):
"""This is a function to perform a universal or generic GET on objects under available Intersight API types,
including those not yet defined in the Intersight SDK for Python. An argument for the API type path is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
Returns:
A dictionary containing all objects of the specified API type. If the API type is inaccessible, an
implicit value of None will be returned.
"""
full_resource_path = "/" + api_path
try:
api_instance.call_api(full_resource_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
print("The API resource path '" + api_path + "' has been accessed successfully.\n")
return results
except:
print("Unable to access the API resource path '" + api_path + "'.\n")
def iu_get_moid(api_path,moid):
"""This is a function to perform a universal or generic GET on a specified object under available
Intersight API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and MOID (managed object identifier) is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
Returns:
A dictionary containing all parameters of the specified API object. If the API object is inaccessible, an
implicit value of None will be returned.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
print("The object located at the resource path '" + full_resource_path + "' has been accessed succesfully.\n")
return results
except:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
def iu_delete_moid(api_path,moid):
"""This is a function to perform a universal or generic DELETE on a specified object under available
Intersight API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and MOID (managed object identifier) is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
Returns:
A statement indicating whether the DELETE method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"DELETE")
print("The deletion of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The DELETE method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The DELETE method failed."
def iu_post(api_path,body):
"""This is a function to perform a universal or generic POST of an object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
body: The content to be created under the targeted API type. This should be provided in a dictionary format.
Returns:
A statement indicating whether the POST method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path
try:
api_instance.call_api(full_resource_path,"POST",body=body)
print("The creation of the object under the resource path '" + full_resource_path + "' has been completed.\n")
return "The POST method was successful."
except Exception as exception_message:
print("Unable to create the object under the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The POST method failed."
def iu_post_moid(api_path,moid,body):
"""This is a function to perform a universal or generic POST of a specified object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path, MOID (managed object identifier), and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
body: The content to be modified on the targeted API object. This should be provided in a dictionary format.
Returns:
A statement indicating whether the POST method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"POST",body=body)
print("The update of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The POST method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The POST method failed."
def iu_patch_moid(api_path,moid,body):
"""This is a function to perform a universal or generic PATCH of a specified object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path, MOID (managed object identifier), and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
body: The content to be modified on the targeted API object. This should be provided in a dictionary format.
Returns:
A statement indicating whether the PATCH method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"PATCH",body=body)
print("The update of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The PATCH method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The PATCH method failed."
# Verify API key variables have been set
key_id_setting = key_id.strip()
if key_id_setting is None or len(key_id_setting) is 0 or "/" not in key_id_setting:
print("\nThe key_id variable for the intersight_universal_api_calls module has not been set correctly!")
print("Please edit the intersight_universal_api_calls.py file and set the key_id variable \nwith the ID of your API key in order for the module to work properly.")
key_setting = key.strip()
if key_setting is None or len(key_setting) is 0 or not os.path.isfile(key_setting):
print("\nThe key variable for the intersight_universal_api_calls module has not been set correctly!")
print("Please edit the intersight_universal_api_calls.py file and set the key variable \nwith your system's path to your API key SecretKey.txt file in order for the module to work properly.")
| 48.211712
| 194
| 0.736149
| 1,547
| 10,703
| 5.004525
| 0.134454
| 0.0527
| 0.045466
| 0.031
| 0.840351
| 0.810772
| 0.786102
| 0.768535
| 0.76001
| 0.748385
| 0
| 0.007945
| 0.200318
| 10,703
| 221
| 195
| 48.429864
| 0.8966
| 0.536672
| 0
| 0.538462
| 0
| 0.025641
| 0.392988
| 0.03089
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.282051
| 0.25641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cd75066e0c41d0e1f25799c63b567a9e37a267c
| 70
|
py
|
Python
|
inpaint_melanoma/models/__init__.py
|
octaviomtz/inpaint_melanoma
|
19cf85a0d51f04ad3e1e3ef68ddf1cc5e27a0b84
|
[
"Apache-2.0"
] | null | null | null |
inpaint_melanoma/models/__init__.py
|
octaviomtz/inpaint_melanoma
|
19cf85a0d51f04ad3e1e3ef68ddf1cc5e27a0b84
|
[
"Apache-2.0"
] | 2
|
2021-09-28T05:42:07.000Z
|
2022-02-26T10:16:29.000Z
|
inpaint_melanoma/models/__init__.py
|
octaviomtz/inpaint_melanoma
|
19cf85a0d51f04ad3e1e3ef68ddf1cc5e27a0b84
|
[
"Apache-2.0"
] | null | null | null |
from .common import *
from .downsampler import *
from .skip import *
| 23.333333
| 27
| 0.728571
| 9
| 70
| 5.666667
| 0.555556
| 0.392157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185714
| 70
| 3
| 28
| 23.333333
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b52de2fb561b92f99cd6d934cd1907e98d68cd7
| 4,820
|
py
|
Python
|
RHGraph/generators/RHGeneratorConstants.py
|
crhaithcock/RushHour
|
9c1854dd117e43ec38a6eacf74a8365a6e01f25c
|
[
"CC0-1.0"
] | null | null | null |
RHGraph/generators/RHGeneratorConstants.py
|
crhaithcock/RushHour
|
9c1854dd117e43ec38a6eacf74a8365a6e01f25c
|
[
"CC0-1.0"
] | null | null | null |
RHGraph/generators/RHGeneratorConstants.py
|
crhaithcock/RushHour
|
9c1854dd117e43ec38a6eacf74a8365a6e01f25c
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 16:56:20 2019
@author: CHaithcock
"""
import sys
sys.path.insert(1, 'C:/Users/chaithcock/Documents/repos/RushHour/RHGraph')
import RHConstants as const
'''
Constants for Toplogical Combinatorial Constructions.
'''
STRIPS = ['C','CC','CCC','CT','TC','T','TT']
SLOTS = range(12)
EXIT_SLOT = 2
ROW_SLOTS = SLOTS[:6]
COL_SLOTS = SLOTS[6:]
HORZ_STRIPS = {}
HORZ_STRIPS['C'] = []
HORZ_STRIPS['C'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,0,0])
HORZ_STRIPS['C'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,0])
HORZ_STRIPS['C'].append([0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0])
HORZ_STRIPS['C'].append([0,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['C'].append([0,0,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'] = []
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0])
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['CC'].append([const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0])
HORZ_STRIPS['CC'].append([0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CC'].append([0,0,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR,const.HORIZONTAL_CAR])
HORZ_STRIPS['CCC'] = [ [const.HORIZONTAL_CAR]*6 ]
HORZ_STRIPS['CT'] = []
HORZ_STRIPS['CT'].append([const.HORIZONTAL_CAR] * 2 + [const.HORIZONTAL_TRUCK] * 3 + [0] )
HORZ_STRIPS['CT'].append([const.HORIZONTAL_CAR] * 2 + [0] + [const.HORIZONTAL_TRUCK] * 3 )
HORZ_STRIPS['CT'].append([0] + [const.HORIZONTAL_CAR] * 2 + [const.HORIZONTAL_TRUCK] * 3 )
HORZ_STRIPS['TC'] = []
HORZ_STRIPS['TC'].append([const.HORIZONTAL_TRUCK] * 2 + [const.HORIZONTAL_CAR] * 3 + [0] )
HORZ_STRIPS['TC'].append([const.HORIZONTAL_TRUCK] * 2 + [0] + [const.HORIZONTAL_CAR] * 3 )
HORZ_STRIPS['TC'].append([0] + [const.HORIZONTAL_TRUCK] * 2 + [const.HORIZONTAL_CAR] * 3 )
HORZ_STRIPS['T'] = []
HORZ_STRIPS['T'].append([const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0,0,0])
HORZ_STRIPS['T'].append([0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0,0])
HORZ_STRIPS['T'].append([0,0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,0])
HORZ_STRIPS['T'].append([0,0,0,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK,const.HORIZONTAL_TRUCK])
HORZ_STRIPS['TT'] = [[const.HORIZONTAL_TRUCK]*6]
VERT_STRIPS = {}
VERT_STRIPS['C'] = []
VERT_STRIPS['C'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,0,0])
VERT_STRIPS['C'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,0])
VERT_STRIPS['C'].append([0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0])
VERT_STRIPS['C'].append([0,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['C'].append([0,0,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'] = []
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,0,0])
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['CC'].append([const.VERTICAL_CAR,const.VERTICAL_CAR,0,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,0])
VERT_STRIPS['CC'].append([0,const.VERTICAL_CAR,const.VERTICAL_CAR,0,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CC'].append([0,0,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR,const.VERTICAL_CAR])
VERT_STRIPS['CCC'] = [ [const.VERTICAL_CAR]*6 ]
VERT_STRIPS['CT'] = []
VERT_STRIPS['CT'].append([const.VERTICAL_CAR] * 2 + [const.VERTICAL_TRUCK] * 3 + [0] )
VERT_STRIPS['CT'].append([const.VERTICAL_CAR] * 2 + [0] + [const.VERTICAL_TRUCK] * 3 )
VERT_STRIPS['CT'].append([0] + [const.VERTICAL_CAR] * 2 + [const.VERTICAL_TRUCK] * 3 )
VERT_STRIPS['TC'] = []
VERT_STRIPS['TC'].append([const.VERTICAL_TRUCK] * 2 + [const.VERTICAL_CAR] * 3 + [0] )
VERT_STRIPS['TC'].append([const.VERTICAL_TRUCK] * 2 + [0] + [const.VERTICAL_CAR] * 3 )
VERT_STRIPS['TC'].append([0] + [const.VERTICAL_TRUCK] * 2 + [const.VERTICAL_CAR] * 3 )
VERT_STRIPS['T'] = []
VERT_STRIPS['T'].append([const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0,0,0])
VERT_STRIPS['T'].append([0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0,0])
VERT_STRIPS['T'].append([0,0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,0])
VERT_STRIPS['T'].append([0,0,0,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK,const.VERTICAL_TRUCK])
VERT_STRIPS['TT'] = [[const.VERTICAL_TRUCK]*6]
| 48.2
| 115
| 0.748755
| 760
| 4,820
| 4.510526
| 0.076316
| 0.262544
| 0.215286
| 0.134189
| 0.853559
| 0.843057
| 0.79755
| 0.766628
| 0.585764
| 0.528588
| 0
| 0.03213
| 0.057261
| 4,820
| 99
| 116
| 48.686869
| 0.722271
| 0.01639
| 0
| 0
| 0
| 0
| 0.033626
| 0.011137
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030303
| 0
| 0.030303
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f019f88ce2cda72700f36d8d8c9ec1b290913d20
| 28
|
py
|
Python
|
tests/__init__.py
|
Merkll/PPM
|
bc14c24885cb651599a4e26a60ed69fc6cd2ef5c
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
Merkll/PPM
|
bc14c24885cb651599a4e26a60ed69fc6cd2ef5c
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
Merkll/PPM
|
bc14c24885cb651599a4e26a60ed69fc6cd2ef5c
|
[
"MIT"
] | null | null | null |
from tests import setUpTest
| 14
| 27
| 0.857143
| 4
| 28
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0463aa27786c9a21bca021255837388946a748a
| 4,135
|
py
|
Python
|
src/test/rpi_gpio_laser/test_lrf_calibration.py
|
chrisspen/homebot
|
cd1a5a2bddcb90d50a00e67b94297942827fbc61
|
[
"MIT"
] | 8
|
2017-11-19T01:42:59.000Z
|
2021-02-08T09:18:58.000Z
|
src/test/rpi_gpio_laser/test_lrf_calibration.py
|
chrisspen/homebot
|
cd1a5a2bddcb90d50a00e67b94297942827fbc61
|
[
"MIT"
] | 24
|
2017-02-14T16:50:02.000Z
|
2017-06-07T18:38:20.000Z
|
src/test/rpi_gpio_laser/test_lrf_calibration.py
|
chrisspen/homebot
|
cd1a5a2bddcb90d50a00e67b94297942827fbc61
|
[
"MIT"
] | 6
|
2017-05-17T03:07:05.000Z
|
2019-11-14T09:33:06.000Z
|
#!../../../.env/bin/python
"""
https://shaneormonde.wordpress.com/2014/01/25/webcam-laser-rangefinder/
theta = arctan(h/actual_d)
theta = pfc * rpc + ro
"""
import sys
from math import *
from scipy import stats
import numpy as np
from laser_range_finder import pixels_to_distance
def percent_error(expected, actual):
return (expected - actual)/float(actual)*100
pixel_readings = [98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 99, 99, 99, 99, 98, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 83, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 80, 80, 80, 80, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 80, 80, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 83, 83, 83, 83, 83, 83, 83, 83, 84, 84, 84, 84, 84, 84, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 116]
#calibrate-laser-on-20151101.jpg
distances = dict(
a=350,#mm
b=400,
c=180,
d=650,
e=650,
f=2450,
)
positions = dict(
#reference_point: column,
a=14,
b=66,
c=179,
d=249,
e=313,
f=319,
)
h = 22.5 # Distance between laser and camera in mm.
image_width = 320
image_height = 240
measurements = []
for letter in sorted(positions.keys()):
actual_d = distances[letter]
pix_dist = pixel_readings[positions[letter]]
assert pix_dist > 0
pfc = abs(pix_dist - image_height/2)
#theta = atan(h/actual_d)
measurements.append((actual_d, pfc))
# reference settings
# theta = .02960246
# pix_dist = 13
# actual_d = 180
# #theta = atan(h/actual_d)
# h = tan(theta)*actual_d
# # print 'h:', h
# measurements = [
# #(actual_d,pix_dist),
# (180,13),
# (160,16),
# (140,20),
# (120,25),
# (100,33),
# (80,44),
# (60,63),
# (40,103),
# ]
print '\nmeasurements:', measurements
x = [_pix_dist for _actual_d, _pix_dist in measurements]
y = [atan(h/_actual_d) for _actual_d, _pix_dist in measurements]
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print '\nlinreg:', slope, intercept, r_value, p_value, std_err
# y = m * x + b => theta = rpc * pfc + ro
# slope = m = rpc
# intercept = b = ro
rpc = slope
ro = intercept
print '\n--rpc=%s --ro=%s' % (slope, intercept)
estimated_distances = pixels_to_distance(
pixel_rows=pixel_readings,
rpc=rpc,
ro=ro,
h=h,
max_height=image_height,
max_width=image_width,
)
print '\nestimated_distances:', estimated_distances
estimated_distances2 = [_v for _i, _v in enumerate(estimated_distances) if _i in positions.values()]
print '\nestimated_distances:', estimated_distances2
print '\npixels from center,calc D (mm),actual D (mm),% error'
differences = []
for letter in positions.keys():
actual_d = distances[letter]
pix_dist = pixel_readings[positions[letter]]
pfc = abs(pix_dist - image_height/2)
#pfc0 = abs(pix_dist - image_height/2)
#theta = atan(h/actual_d)
estimated_d = estimated_distances[positions[letter]]
print '%s,%s,%s,%s' % (pfc, estimated_d, actual_d, percent_error(estimated_d, actual_d))
differences.append(abs(actual_d - estimated_d))
print '\naverage error:', sum(differences)/float(len(differences))
| 35.646552
| 1,428
| 0.624426
| 715
| 4,135
| 3.502098
| 0.218182
| 0.145367
| 0.210863
| 0.271566
| 0.455272
| 0.439696
| 0.439696
| 0.397364
| 0.371805
| 0.371805
| 0
| 0.265115
| 0.2
| 4,135
| 115
| 1,429
| 35.956522
| 0.491838
| 0.137122
| 0
| 0.095238
| 0
| 0
| 0.049074
| 0.01293
| 0
| 0
| 0
| 0
| 0.015873
| 0
| null | null | 0
| 0.079365
| null | null | 0.126984
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2d3949dc26c8485f687b4ab030f8056b3554599
| 31
|
py
|
Python
|
core/clients/__init__.py
|
Latterlig96/airflow-model-trainer
|
7da36aae3036759639ae1c556f41fc70409aa444
|
[
"MIT"
] | null | null | null |
core/clients/__init__.py
|
Latterlig96/airflow-model-trainer
|
7da36aae3036759639ae1c556f41fc70409aa444
|
[
"MIT"
] | null | null | null |
core/clients/__init__.py
|
Latterlig96/airflow-model-trainer
|
7da36aae3036759639ae1c556f41fc70409aa444
|
[
"MIT"
] | null | null | null |
from .minio import MinioHandler
| 31
| 31
| 0.870968
| 4
| 31
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2eab4facd87ed7c35d411e644bbeee1f687288f
| 259
|
py
|
Python
|
src/api/pdi/application/dashboard/GetDataOperationWidget/GetDataOperationWidgetResponse.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | 1
|
2020-12-18T21:37:28.000Z
|
2020-12-18T21:37:28.000Z
|
src/api/pdi/application/dashboard/GetDataOperationWidget/GetDataOperationWidgetResponse.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | null | null | null |
src/api/pdi/application/dashboard/GetDataOperationWidget/GetDataOperationWidgetResponse.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | 1
|
2020-12-18T21:37:31.000Z
|
2020-12-18T21:37:31.000Z
|
from pdip.cqrs.decorators import responseclass
from pdi.application.dashboard.GetDataOperationWidget.GetDataOperationWidgetDto import GetDataOperationWidgetDto
@responseclass
class GetDataOperationWidgetResponse:
Data: GetDataOperationWidgetDto = None
| 28.777778
| 112
| 0.876448
| 20
| 259
| 11.35
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084942
| 259
| 8
| 113
| 32.375
| 0.957806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
65008d9464118767a9b66967cd0e84cfacc16491
| 132
|
py
|
Python
|
sc-kpm/python/common/__init__.py
|
MaxGavr/sc-machine
|
577c6644149ac08f6941add9882e6d1d2144f134
|
[
"MIT"
] | null | null | null |
sc-kpm/python/common/__init__.py
|
MaxGavr/sc-machine
|
577c6644149ac08f6941add9882e6d1d2144f134
|
[
"MIT"
] | null | null | null |
sc-kpm/python/common/__init__.py
|
MaxGavr/sc-machine
|
577c6644149ac08f6941add9882e6d1d2144f134
|
[
"MIT"
] | 2
|
2018-04-18T21:35:28.000Z
|
2019-04-24T17:57:23.000Z
|
from .sc_keynodes import ScKeynodes
from .sc_module import ScModule
from .sc_exception import *
from .sc_event import ScEventManager
| 33
| 36
| 0.848485
| 19
| 132
| 5.684211
| 0.526316
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 132
| 4
| 36
| 33
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8efe37229039cde1ff4dfd908f2930551a87437
| 340
|
py
|
Python
|
ns-allinone-3.27/ns-3.27/src/propagation/bindings/callbacks_list.py
|
zack-braun/4607_NS
|
43c8fb772e5552fb44bd7cd34173e73e3fb66537
|
[
"MIT"
] | 93
|
2019-04-21T08:22:26.000Z
|
2022-03-30T04:26:29.000Z
|
ns-allinone-3.27/ns-3.27/src/propagation/bindings/callbacks_list.py
|
zack-braun/4607_NS
|
43c8fb772e5552fb44bd7cd34173e73e3fb66537
|
[
"MIT"
] | 12
|
2019-04-19T16:39:58.000Z
|
2021-06-22T13:18:32.000Z
|
ns-allinone-3.27/ns-3.27/src/propagation/bindings/callbacks_list.py
|
zack-braun/4607_NS
|
43c8fb772e5552fb44bd7cd34173e73e3fb66537
|
[
"MIT"
] | 21
|
2019-05-27T19:36:12.000Z
|
2021-07-26T02:37:41.000Z
|
callback_classes = [
['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| 68
| 163
| 0.576471
| 44
| 340
| 4.431818
| 0.204545
| 0.697436
| 0.846154
| 1.230769
| 0.697436
| 0.697436
| 0.697436
| 0.697436
| 0.697436
| 0.697436
| 0
| 0.065359
| 0.1
| 340
| 4
| 164
| 85
| 0.571895
| 0
| 0
| 0
| 0
| 0
| 0.661765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8f32733651acdc5cb5c566df2ca9a64723e562d
| 36
|
py
|
Python
|
reward/policy/__init__.py
|
lgvaz/torchrl
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | 5
|
2018-06-21T14:33:40.000Z
|
2018-08-18T02:26:03.000Z
|
reward/policy/__init__.py
|
lgvaz/reward
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | null | null | null |
reward/policy/__init__.py
|
lgvaz/reward
|
cfff8acaf70d1fec72169162b95ab5ad3547d17a
|
[
"MIT"
] | 2
|
2018-05-08T03:34:49.000Z
|
2018-06-22T15:04:17.000Z
|
from .base_policy import BasePolicy
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8fbc569564867e34e1942aa193d13486328427f
| 36
|
py
|
Python
|
NNEvol/get_best_nn.py
|
cvazquezlos/NNEvol-python
|
80448e09510c92580bae03ad8b528aa878b14092
|
[
"MIT"
] | 1
|
2019-08-15T09:58:21.000Z
|
2019-08-15T09:58:21.000Z
|
NNEvol/get_best_nn.py
|
cvazquezlos/NNEvol-python
|
80448e09510c92580bae03ad8b528aa878b14092
|
[
"MIT"
] | null | null | null |
NNEvol/get_best_nn.py
|
cvazquezlos/NNEvol-python
|
80448e09510c92580bae03ad8b528aa878b14092
|
[
"MIT"
] | null | null | null |
def get_best_nn():
return(None)
| 12
| 18
| 0.666667
| 6
| 36
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 36
| 3
| 19
| 12
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
684cd8068cc5357b6372674ae26a80940d024f80
| 48
|
py
|
Python
|
mountequist/clients/__init__.py
|
ginjeni1/mountequist
|
795d6921e41be8c3097c7c4a06f0ad0a1e4742b5
|
[
"MIT"
] | null | null | null |
mountequist/clients/__init__.py
|
ginjeni1/mountequist
|
795d6921e41be8c3097c7c4a06f0ad0a1e4742b5
|
[
"MIT"
] | null | null | null |
mountequist/clients/__init__.py
|
ginjeni1/mountequist
|
795d6921e41be8c3097c7c4a06f0ad0a1e4742b5
|
[
"MIT"
] | 1
|
2018-06-12T18:51:37.000Z
|
2018-06-12T18:51:37.000Z
|
from mountequist.clients.httpclient import Http
| 24
| 47
| 0.875
| 6
| 48
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6857054f8d0ce059a62b2bae6f5381758d938c0b
| 182
|
py
|
Python
|
spectral/io/__init__.py
|
wwlswj/spectral
|
e886e4d9f8c34f512c8e81867f0de76e15550572
|
[
"MIT"
] | 398
|
2015-01-16T14:55:20.000Z
|
2022-03-29T04:13:00.000Z
|
spectral/io/__init__.py
|
wwlswj/spectral
|
e886e4d9f8c34f512c8e81867f0de76e15550572
|
[
"MIT"
] | 108
|
2015-01-20T15:39:17.000Z
|
2022-02-23T09:59:55.000Z
|
spectral/io/__init__.py
|
wwlswj/spectral
|
e886e4d9f8c34f512c8e81867f0de76e15550572
|
[
"MIT"
] | 123
|
2015-03-25T10:15:54.000Z
|
2022-03-06T14:24:21.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
from .spyfile import SpyFile
from ..io import aviris
from ..io import erdas
from ..io import envi
| 26
| 82
| 0.813187
| 26
| 182
| 5.423077
| 0.538462
| 0.12766
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 182
| 6
| 83
| 30.333333
| 0.892405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
686d75d4337e0c22c16720ac4acc73e2d881755a
| 9,363
|
py
|
Python
|
tests/actions/dictionary/tests.py
|
maximejf42/python-shortcuts
|
addde7732e88d250aaea75c228241ed182fca850
|
[
"MIT"
] | 588
|
2018-09-23T20:39:15.000Z
|
2022-03-27T13:02:48.000Z
|
tests/actions/dictionary/tests.py
|
yy/python-shortcuts
|
025c54be965600fbe7ff6ec96f594efea503cbc3
|
[
"MIT"
] | 63
|
2018-09-27T20:13:56.000Z
|
2022-03-29T03:22:32.000Z
|
tests/actions/dictionary/tests.py
|
yy/python-shortcuts
|
025c54be965600fbe7ff6ec96f594efea503cbc3
|
[
"MIT"
] | 35
|
2018-09-24T03:37:49.000Z
|
2021-07-05T07:32:04.000Z
|
from shortcuts.actions import DictionaryAction, SetDictionaryValueAction, GetDictionaryFromInputAction
from shortcuts import Shortcut, FMT_SHORTCUT
from tests.conftest import ActionTomlLoadsMixin, SimpleBaseDumpsLoadsTest
class TestDictionaryAction:
def test_dump(self):
data = {
'items': [
{'key': 'k1', 'value': 'v1'},
{'key': 'k2', 'value': '{{var}}'},
],
}
action = DictionaryAction(data=data)
exp_dump = {
'WFWorkflowActionIdentifier': 'is.workflow.actions.dictionary',
'WFWorkflowActionParameters': {
'WFItems': {
'Value': {
'WFDictionaryFieldValueItems': [
{
'WFItemType': 0,
'WFKey': {
'Value': {'attachmentsByRange': {}, 'string': 'k1'},
'WFSerializationType': 'WFTextTokenString',
},
'WFValue': {
'Value': {'attachmentsByRange': {}, 'string': 'v1'},
'WFSerializationType': 'WFTextTokenString',
}
},
{
'WFItemType': 0,
'WFKey': {
'Value': {'attachmentsByRange': {}, 'string': 'k2'},
'WFSerializationType': 'WFTextTokenString',
},
'WFValue': {
'Value': {
'attachmentsByRange': {
'{0, 1}': {'Type': 'Variable', 'VariableName': 'var'}
},
'string': '',
},
'WFSerializationType': 'WFTextTokenString',
}
}
]
},
'WFSerializationType': 'WFDictionaryFieldValue',
},
},
}
assert action.dump() == exp_dump
class TestShortcutWithDictionary:
toml_string = '''
[[action]]
type = "dictionary"
[[action.items]]
key = "some key"
value = "some value"
[[action.items]]
key = "another key"
value = "{{x}}"
'''
def test_loads_from_toml(self):
sc = Shortcut.loads(self.toml_string)
assert len(sc.actions) == 1
action = sc.actions[0]
assert isinstance(action, DictionaryAction) is True
exp_data = {
'items': [
{'key': 'some key', 'value': 'some value'},
{'key': 'another key', 'value': '{{x}}'},
],
}
assert action.data == exp_data
def test_dumps_to_plist(self):
sc = Shortcut.loads(self.toml_string)
dump = sc.dumps(file_format=FMT_SHORTCUT)
exp_dump = '<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n<plist version="1.0">\n<dict>\n\t<key>WFWorkflowActions</key>\n\t<array>\n\t\t<dict>\n\t\t\t<key>WFWorkflowActionIdentifier</key>\n\t\t\t<string>is.workflow.actions.dictionary</string>\n\t\t\t<key>WFWorkflowActionParameters</key>\n\t\t\t<dict>\n\t\t\t\t<key>WFItems</key>\n\t\t\t\t<dict>\n\t\t\t\t\t<key>Value</key>\n\t\t\t\t\t<dict>\n\t\t\t\t\t\t<key>WFDictionaryFieldValueItems</key>\n\t\t\t\t\t\t<array>\n\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t<key>WFItemType</key>\n\t\t\t\t\t\t\t\t<integer>0</integer>\n\t\t\t\t\t\t\t\t<key>WFKey</key>\n\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t<key>Value</key>\n\t\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t\t<key>attachmentsByRange</key>\n\t\t\t\t\t\t\t\t\t\t<dict/>\n\t\t\t\t\t\t\t\t\t\t<key>string</key>\n\t\t\t\t\t\t\t\t\t\t<string>some key</string>\n\t\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t\t<key>WFSerializationType</key>\n\t\t\t\t\t\t\t\t\t<string>WFTextTokenString</string>\n\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t<key>WFValue</key>\n\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t<key>Value</key>\n\t\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t\t<key>attachmentsByRange</key>\n\t\t\t\t\t\t\t\t\t\t<dict/>\n\t\t\t\t\t\t\t\t\t\t<key>string</key>\n\t\t\t\t\t\t\t\t\t\t<string>some value</string>\n\t\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t\t<key>WFSerializationType</key>\n\t\t\t\t\t\t\t\t\t<string>WFTextTokenString</string>\n\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t<key>WFItemType</key>\n\t\t\t\t\t\t\t\t<integer>0</integer>\n\t\t\t\t\t\t\t\t<key>WFKey</key>\n\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t<key>Value</key>\n\t\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t\t<key>attachmentsByRange</key>\n\t\t\t\t\t\t\t\t\t\t<dict/>\n\t\t\t\t\t\t\t\t\t\t<key>string</key>\n\t\t\t\t\t\t\t\t\t\t<string>another key</string>\n\t\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t\t<key>WFSerializationType</key>\n\t\t\t\t\t\t\t\t\t<string>WFTextTokenString</string>\n\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t<key>WFValue</key>\n\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t<key>Value</key>\n\t\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t\t<key>attachmentsByRange</key>\n\t\t\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t\t\t<key>{0, 1}</key>\n\t\t\t\t\t\t\t\t\t\t\t<dict>\n\t\t\t\t\t\t\t\t\t\t\t\t<key>Type</key>\n\t\t\t\t\t\t\t\t\t\t\t\t<string>Variable</string>\n\t\t\t\t\t\t\t\t\t\t\t\t<key>VariableName</key>\n\t\t\t\t\t\t\t\t\t\t\t\t<string>x</string>\n\t\t\t\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t\t\t<key>string</key>\n\t\t\t\t\t\t\t\t\t\t<string></string>\n\t\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t\t\t<key>WFSerializationType</key>\n\t\t\t\t\t\t\t\t\t<string>WFTextTokenString</string>\n\t\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t\t</dict>\n\t\t\t\t\t\t</array>\n\t\t\t\t\t</dict>\n\t\t\t\t\t<key>WFSerializationType</key>\n\t\t\t\t\t<string>WFDictionaryFieldValue</string>\n\t\t\t\t</dict>\n\t\t\t</dict>\n\t\t</dict>\n\t</array>\n\t<key>WFWorkflowClientRelease</key>\n\t<string>2.0</string>\n\t<key>WFWorkflowClientVersion</key>\n\t<string>700</string>\n\t<key>WFWorkflowIcon</key>\n\t<dict>\n\t\t<key>WFWorkflowIconGlyphNumber</key>\n\t\t<integer>59511</integer>\n\t\t<key>WFWorkflowIconImageData</key>\n\t\t<data>\n\t\t</data>\n\t\t<key>WFWorkflowIconStartColor</key>\n\t\t<integer>431817727</integer>\n\t</dict>\n\t<key>WFWorkflowImportQuestions</key>\n\t<array/>\n\t<key>WFWorkflowInputContentItemClasses</key>\n\t<array>\n\t\t<string>WFAppStoreAppContentItem</string>\n\t\t<string>WFArticleContentItem</string>\n\t\t<string>WFContactContentItem</string>\n\t\t<string>WFDateContentItem</string>\n\t\t<string>WFEmailAddressContentItem</string>\n\t\t<string>WFGenericFileContentItem</string>\n\t\t<string>WFImageContentItem</string>\n\t\t<string>WFiTunesProductContentItem</string>\n\t\t<string>WFLocationContentItem</string>\n\t\t<string>WFDCMapsLinkContentItem</string>\n\t\t<string>WFAVAssetContentItem</string>\n\t\t<string>WFPDFContentItem</string>\n\t\t<string>WFPhoneNumberContentItem</string>\n\t\t<string>WFRichTextContentItem</string>\n\t\t<string>WFSafariWebPageContentItem</string>\n\t\t<string>WFStringContentItem</string>\n\t\t<string>WFURLContentItem</string>\n\t</array>\n\t<key>WFWorkflowTypes</key>\n\t<array>\n\t\t<string>NCWidget</string>\n\t\t<string>WatchKit</string>\n\t</array>\n</dict>\n</plist>\n'
assert dump == exp_dump
class TestSetDictionaryValueAction(ActionTomlLoadsMixin):
def test_dumps(self):
key = 'key1'
value = 'value1'
action = SetDictionaryValueAction(data={'key': key, 'value': value})
exp_dump = {
'WFWorkflowActionIdentifier': 'is.workflow.actions.setvalueforkey',
'WFWorkflowActionParameters': {
'WFDictionaryKey': {
'Value': {'attachmentsByRange': {}, 'string': key},
'WFSerializationType': 'WFTextTokenString',
},
'WFDictionaryValue': {
'Value': {'attachmentsByRange': {}, 'string': value},
'WFSerializationType': 'WFTextTokenString',
},
}
}
assert action.dump() == exp_dump
def test_loads_toml(self):
key = 'key2'
value = 'value2'
toml = f'''
[[action]]
type = "set_value_for_key"
key = "{key}"
value = "{value}"
'''
self._assert_toml_loads(toml, SetDictionaryValueAction, {'key': key, 'value': value})
class TestGetDictionaryFromInputAction(SimpleBaseDumpsLoadsTest):
action_class = GetDictionaryFromInputAction
itype = 'is.workflow.actions.detect.dictionary'
toml = '[[action]]\ntype = "get_dictionary"'
action_xml = '''
<dict>
<key>WFWorkflowActionIdentifier</key>
<string>is.workflow.actions.detect.dictionary</string>
<key>WFWorkflowActionParameters</key>
<dict></dict>
</dict>
'''
| 66.404255
| 4,487
| 0.554844
| 1,485
| 9,363
| 3.477441
| 0.090909
| 0.234314
| 0.28873
| 0.323005
| 0.504841
| 0.383036
| 0.335012
| 0.30306
| 0.293571
| 0.291247
| 0
| 0.00666
| 0.230268
| 9,363
| 140
| 4,488
| 66.878571
| 0.709588
| 0
| 0
| 0.272727
| 0
| 0.008264
| 0.637723
| 0.504326
| 0
| 0
| 0
| 0
| 0.057851
| 1
| 0.041322
| false
| 0
| 0.033058
| 0
| 0.14876
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0be65ef4dcdabb0c438a24d205926b0d6f400993
| 166
|
py
|
Python
|
jupyterlab2pymolpysnips/MolecularRepresentation/ellipcol.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlab2pymolpysnips/MolecularRepresentation/ellipcol.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
jupyterlab2pymolpysnips/MolecularRepresentation/ellipcol.py
|
MooersLab/pymolpysnips
|
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
|
[
"MIT"
] | null | null | null |
"""
cmd.do('set ellipsoid_color, ${1:color};')
cmd.do('${0}')
"""
cmd.do('set ellipsoid_color, color;')
# Description: Set ellipsoid color.
# Source: placeHolder
| 16.6
| 42
| 0.650602
| 22
| 166
| 4.818182
| 0.454545
| 0.141509
| 0.481132
| 0.320755
| 0.415094
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.126506
| 166
| 9
| 43
| 18.444444
| 0.717241
| 0.686747
| 0
| 0
| 0
| 0
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bf84c34ead6a8d765f441e8c44a84575a8cbb24
| 70
|
py
|
Python
|
bokchoi/__init__.py
|
TimNooren/buzz
|
28ce37b76fbe7e3e3750cf632e8820537374df89
|
[
"MIT"
] | 3
|
2018-03-17T15:16:59.000Z
|
2018-09-11T06:10:24.000Z
|
bokchoi/__init__.py
|
TimNooren/buzz
|
28ce37b76fbe7e3e3750cf632e8820537374df89
|
[
"MIT"
] | 1
|
2018-09-17T13:04:43.000Z
|
2018-09-17T13:04:43.000Z
|
bokchoi/__init__.py
|
TimNooren/buzz
|
28ce37b76fbe7e3e3750cf632e8820537374df89
|
[
"MIT"
] | 5
|
2018-04-24T09:12:56.000Z
|
2019-05-22T07:30:03.000Z
|
from bokchoi.bokchoi import Bokchoi
from bokchoi.config import Config
| 23.333333
| 35
| 0.857143
| 10
| 70
| 6
| 0.4
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 3
| 36
| 23.333333
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0402d2494fd7ca1e098210298d49f6d7fce83a76
| 48
|
py
|
Python
|
groinkbot/__init__.py
|
GroinkIndustries/GroinkBot
|
40725988af78e8f404df6faa623794ac0e951599
|
[
"Apache-2.0"
] | null | null | null |
groinkbot/__init__.py
|
GroinkIndustries/GroinkBot
|
40725988af78e8f404df6faa623794ac0e951599
|
[
"Apache-2.0"
] | null | null | null |
groinkbot/__init__.py
|
GroinkIndustries/GroinkBot
|
40725988af78e8f404df6faa623794ac0e951599
|
[
"Apache-2.0"
] | null | null | null |
from .interface import *
from .service import *
| 24
| 25
| 0.75
| 6
| 48
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 26
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0412fb3cb411ed09fa96cada72ee8619a2c9e1aa
| 571
|
py
|
Python
|
grr/server/aff4_objects/registry_init.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
grr/server/aff4_objects/registry_init.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | 1
|
2018-05-08T21:15:51.000Z
|
2018-05-08T21:15:51.000Z
|
grr/server/aff4_objects/registry_init.py
|
panhania/grr
|
fe16a7311a528e31fe0e315a880e98273b8df960
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Load all aff4 objects in order to populate the registry.
"""
# pylint: disable=unused-import
from grr.server.aff4_objects import aff4_grr
from grr.server.aff4_objects import collects
from grr.server.aff4_objects import cronjobs
from grr.server.aff4_objects import filestore
from grr.server.aff4_objects import security
from grr.server.aff4_objects import standard
from grr.server.aff4_objects import stats
from grr.server.aff4_objects import stats_store
from grr.server.aff4_objects import user_managers
from grr.server.aff4_objects import users
| 38.066667
| 59
| 0.838879
| 91
| 571
| 5.120879
| 0.340659
| 0.259657
| 0.27897
| 0.364807
| 0.665236
| 0.665236
| 0.150215
| 0
| 0
| 0
| 0
| 0.023301
| 0.098074
| 571
| 14
| 60
| 40.785714
| 0.881553
| 0.187391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.