hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f391d9ff35976d65033711cd8a502817c935c662 | 124 | py | Python | Section 8/voice_changer.py | PacktPublishing/Learning-Python-v- | 30fb28dfaaa18815f1b4c0b683e8839da223b195 | [
"MIT"
] | 1 | 2021-10-05T19:45:43.000Z | 2021-10-05T19:45:43.000Z | Section 8/voice_changer.py | PacktPublishing/Learning-Python-v- | 30fb28dfaaa18815f1b4c0b683e8839da223b195 | [
"MIT"
] | null | null | null | Section 8/voice_changer.py | PacktPublishing/Learning-Python-v- | 30fb28dfaaa18815f1b4c0b683e8839da223b195 | [
"MIT"
] | 2 | 2020-09-25T19:56:46.000Z | 2021-09-02T11:14:28.000Z | from sound_conversion import rectomp3
from sound_conversion.rectowma import rec2wma
print rectomp3.rec2mp3()
print rec2wma() | 31 | 45 | 0.862903 | 16 | 124 | 6.5625 | 0.5625 | 0.171429 | 0.361905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053097 | 0.08871 | 124 | 4 | 46 | 31 | 0.876106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.5 | null | null | 0.5 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
f3c515428e7599e9bbe295dcc9f923883866a3be | 1,982 | py | Python | coaddit/tests/test_rasterize.py | beckermr/python-r3d | c7dc6f7c53398387ad729248677351a2775016d7 | [
"BSD-3-Clause"
] | null | null | null | coaddit/tests/test_rasterize.py | beckermr/python-r3d | c7dc6f7c53398387ad729248677351a2775016d7 | [
"BSD-3-Clause"
] | 5 | 2019-04-23T11:11:32.000Z | 2019-04-25T13:08:35.000Z | coaddit/tests/test_rasterize.py | beckermr/python-r3d | c7dc6f7c53398387ad729248677351a2775016d7 | [
"BSD-3-Clause"
] | 1 | 2019-04-23T10:51:38.000Z | 2019-04-23T10:51:38.000Z | import numpy as np
import pytest
from coaddit.rasterize import rasterize_poly
@pytest.mark.parametrize('off', [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
def test_smoke(off):
# convention here is that last dimension is x then y
verts = np.zeros((4, 2)) + 0.5 + off
verts[1, 0] = 1.5 + off
verts[2, 0] = 1.5 + off
verts[2, 1] = 1.5 + off
verts[3, 1] = 1.5 + off
verts[:, 0] += 2
arr, start_inds = rasterize_poly(verts, 1)
assert start_inds[0] == off + 2
assert start_inds[1] == off
assert np.all(arr == 0.25), arr
@pytest.mark.parametrize('off', [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
def test_shift(off):
# convention here is that last dimension is x then y
verts = np.zeros((4, 2)) + off
verts[0, 1] += 0.25
verts[0, 0] += 0.3
verts[1, 1] += 1.25
verts[1, 0] += 0.3
verts[2, 1] += 1.25
verts[2, 0] += 1.3
verts[3, 1] += 0.25
verts[3, 0] += 1.3
verts[:, 0] += 2
area = np.array([
[0.75*0.7, 0.25*0.7],
[0.75*0.3, 0.25*0.3]])
arr, start_inds = rasterize_poly(verts, 1)
assert start_inds[0] == off + 2
assert start_inds[1] == off
assert arr.shape[0] == 2
assert arr.shape[1] == 2
assert np.allclose(arr, area)
@pytest.mark.parametrize('off', [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
def test_shift_dims(off):
# convention here is that last dimension is x then y
verts = np.zeros((4, 2)) + off
verts[0, 1] += 0.25
verts[0, 0] += 0.3
verts[1, 1] += 2.25
verts[1, 0] += 0.3
verts[2, 1] += 2.25
verts[2, 0] += 1.3
verts[3, 1] += 0.25
verts[3, 0] += 1.3
verts[:, 0] += 2
area = np.array([
[0.75*0.7, 1 * 0.7, 0.25*0.7],
[0.75*0.3, 1 * 0.3, 0.25*0.3]])
area /= np.sum(area)
arr, start_inds = rasterize_poly(verts, 1)
assert start_inds[0] == off + 2
assert start_inds[1] == off
assert arr.shape[0] == 2
assert arr.shape[1] == 3
assert np.allclose(arr, area)
| 26.426667 | 71 | 0.532291 | 372 | 1,982 | 2.790323 | 0.126344 | 0.023121 | 0.086705 | 0.038536 | 0.88054 | 0.815029 | 0.780347 | 0.780347 | 0.780347 | 0.745665 | 0 | 0.141176 | 0.270938 | 1,982 | 74 | 72 | 26.783784 | 0.577163 | 0.07669 | 0 | 0.603448 | 0 | 0 | 0.004929 | 0 | 0 | 0 | 0 | 0 | 0.224138 | 1 | 0.051724 | false | 0 | 0.051724 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
45f3714d11bc4571bdede325c43c816e9fa1253e | 3,992 | py | Python | dask_gdf/tests/test_join.py | jrhemstad/dask_gdf | c49933d72ce39ca720662b5d5124072e63087a62 | [
"Apache-2.0"
] | null | null | null | dask_gdf/tests/test_join.py | jrhemstad/dask_gdf | c49933d72ce39ca720662b5d5124072e63087a62 | [
"Apache-2.0"
] | null | null | null | dask_gdf/tests/test_join.py | jrhemstad/dask_gdf | c49933d72ce39ca720662b5d5124072e63087a62 | [
"Apache-2.0"
] | null | null | null | import pytest
import numpy as np
import pygdf as gd
import dask_gdf as dgd
from functools import partial
param_nrows = [5, 10, 100, 400]
@pytest.mark.parametrize('left_nrows', param_nrows)
@pytest.mark.parametrize('right_nrows', param_nrows)
@pytest.mark.parametrize('left_nkeys', [4, 5])
@pytest.mark.parametrize('right_nkeys', [4, 5])
def test_join_inner(left_nrows, right_nrows, left_nkeys, right_nkeys):
chunksize = 50
np.random.seed(0)
# PyGDF
left = gd.DataFrame({'x': np.random.randint(0, left_nkeys,
size=left_nrows),
'a': np.arange(left_nrows)}.items())
right = gd.DataFrame({'x': np.random.randint(0, right_nkeys,
size=right_nrows),
'a': 1000 * np.arange(right_nrows)}.items())
expect = left.set_index('x').join(right.set_index('x'), how='inner',
sort=True, lsuffix='l', rsuffix='r')
expect = expect.to_pandas()
# Dask GDf
left = dgd.from_pygdf(left, chunksize=chunksize)
right = dgd.from_pygdf(right, chunksize=chunksize)
joined = left.set_index('x').join(right.set_index('x'), how='inner',
lsuffix='l', rsuffix='r')
got = joined.compute().to_pandas()
# Check index
np.testing.assert_array_equal(expect.index.values,
got.index.values)
# Check rows in each groups
expect_rows = {}
got_rows = {}
def gather(df, grows):
grows[df['index'].values[0]] = (set(df.al), set(df.ar))
expect.reset_index().groupby('index')\
.apply(partial(gather, grows=expect_rows))
expect.reset_index().groupby('index')\
.apply(partial(gather, grows=got_rows))
assert got_rows == expect_rows
@pytest.mark.parametrize('left_nrows', param_nrows)
@pytest.mark.parametrize('right_nrows', param_nrows)
@pytest.mark.parametrize('left_nkeys', [4, 5])
@pytest.mark.parametrize('right_nkeys', [4, 5])
@pytest.mark.parametrize('how', ['left', 'right'])
def test_join_left(left_nrows, right_nrows, left_nkeys, right_nkeys, how):
chunksize = 50
np.random.seed(0)
# PyGDF
left = gd.DataFrame({'x': np.random.randint(0, left_nkeys,
size=left_nrows),
'a': np.arange(left_nrows, dtype=np.float64)}.items())
right = gd.DataFrame({'x': np.random.randint(0, right_nkeys,
size=right_nrows),
'a': 1000 * np.arange(right_nrows,
dtype=np.float64)}.items())
expect = left.set_index('x').join(right.set_index('x'), how=how,
sort=True, lsuffix='l', rsuffix='r')
expect = expect.to_pandas()
# Dask GDf
left = dgd.from_pygdf(left, chunksize=chunksize)
right = dgd.from_pygdf(right, chunksize=chunksize)
joined = left.set_index('x').join(right.set_index('x'), how=how,
lsuffix='l', rsuffix='r')
got = joined.compute().to_pandas()
# Check index
np.testing.assert_array_equal(expect.index.values,
got.index.values)
# Check rows in each groups
expect_rows = {}
got_rows = {}
def gather(df, grows):
cola = np.sort(np.asarray(df.al))
colb = np.sort(np.asarray(df.ar))
grows[df['index'].values[0]] = (cola, colb)
expect.reset_index().groupby('index')\
.apply(partial(gather, grows=expect_rows))
expect.reset_index().groupby('index')\
.apply(partial(gather, grows=got_rows))
for k in expect_rows:
np.testing.assert_array_equal(expect_rows[k][0],
got_rows[k][0])
np.testing.assert_array_equal(expect_rows[k][1],
got_rows[k][1])
| 34.413793 | 79 | 0.566132 | 492 | 3,992 | 4.428862 | 0.168699 | 0.041303 | 0.086737 | 0.045893 | 0.888022 | 0.842129 | 0.832492 | 0.832492 | 0.764571 | 0.764571 | 0 | 0.015907 | 0.291333 | 3,992 | 115 | 80 | 34.713043 | 0.75433 | 0.026303 | 0 | 0.641026 | 0 | 0 | 0.041258 | 0 | 0 | 0 | 0 | 0 | 0.064103 | 1 | 0.051282 | false | 0 | 0.064103 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3402af00baeaa7e9710644341719ce1b097668c4 | 174 | py | Python | vectorflow/optimizer/__init__.py | dongrenguang/VectorFlow | 1e08a0cff6e0a282b03356d58cf4bab66339f922 | [
"MIT"
] | null | null | null | vectorflow/optimizer/__init__.py | dongrenguang/VectorFlow | 1e08a0cff6e0a282b03356d58cf4bab66339f922 | [
"MIT"
] | null | null | null | vectorflow/optimizer/__init__.py | dongrenguang/VectorFlow | 1e08a0cff6e0a282b03356d58cf4bab66339f922 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .optimizer import *
from .gradient_descent import *
from .momentum import *
from .ada_grad import *
from .rms_prop import *
from .adam import *
| 19.333333 | 31 | 0.706897 | 24 | 174 | 5 | 0.583333 | 0.416667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006944 | 0.172414 | 174 | 8 | 32 | 21.75 | 0.826389 | 0.12069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
34304874e21d7622d049973311acd8a004a3f84a | 225 | py | Python | airship_convert/render/__init__.py | eliasah/airship-convert | 80b414513f7268dcb98e30902cbb992055ce5ccc | [
"MIT"
] | 2 | 2016-06-19T20:36:38.000Z | 2016-06-20T06:08:19.000Z | airship_convert/render/__init__.py | eliasah/airship-convert | 80b414513f7268dcb98e30902cbb992055ce5ccc | [
"MIT"
] | 2 | 2016-08-05T10:01:10.000Z | 2016-08-10T14:00:40.000Z | airship_convert/render/__init__.py | eliasah/airship-convert | 80b414513f7268dcb98e30902cbb992055ce5ccc | [
"MIT"
] | 1 | 2016-08-05T09:05:12.000Z | 2016-08-05T09:05:12.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from airship_convert.render.document import render_doc
from airship_convert.render.source import render_source
| 28.125 | 64 | 0.804444 | 31 | 225 | 5.516129 | 0.645161 | 0.128655 | 0.210526 | 0.280702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004975 | 0.106667 | 225 | 7 | 65 | 32.142857 | 0.845771 | 0.191111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0.333333 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cab23d2229009e9fe58bce3fb6811ef644a220ed | 40,578 | py | Python | tests/test_bibliometric.py | robertatakenaka/processing | 138389b9d44df92daddeb1107fd78ae7849c0b66 | [
"BSD-2-Clause"
] | 2 | 2016-08-10T13:33:53.000Z | 2019-03-16T04:31:35.000Z | tests/test_bibliometric.py | DalavanCloud/processing | 629b50b45ba7a176651cd3bfcdb441dab6fddfcc | [
"BSD-2-Clause"
] | 18 | 2015-05-25T14:15:18.000Z | 2021-12-13T19:50:55.000Z | tests/test_bibliometric.py | DalavanCloud/processing | 629b50b45ba7a176651cd3bfcdb441dab6fddfcc | [
"BSD-2-Clause"
] | 5 | 2015-05-21T19:31:05.000Z | 2019-03-16T04:31:42.000Z | import unittest
from bibliometric import citedby_journal
class TestBibliometric(unittest.TestCase):
def test_compute_citations(self):
query_result = {
"took": 1857,
"hits": {
"max_score": 0.0,
"hits": [],
"total": 195
},
"aggregations": {
"publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2012"
},
{
"doc_count": 2,
"key": "2011"
},
{
"doc_count": 1,
"key": "2010"
},
{
"doc_count": 2,
"key": "2008"
},
{
"doc_count": 3,
"key": "2007"
},
{
"doc_count": 2,
"key": "2005"
},
{
"doc_count": 1,
"key": "2003"
},
{
"doc_count": 1,
"key": "2001"
},
{
"doc_count": 1,
"key": "1998"
},
{
"doc_count": 1,
"key": "1997"
},
{
"doc_count": 1,
"key": "1993"
},
{
"doc_count": 1,
"key": "1990"
},
{
"doc_count": 3,
"key": "1988"
},
{
"doc_count": 1,
"key": "1986"
},
{
"doc_count": 2,
"key": "1980"
},
{
"doc_count": 1,
"key": "1979"
},
{
"doc_count": 1,
"key": "1973"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 25,
"key": "2012"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2013"
},
{
"doc_count": 3,
"key": "2012"
},
{
"doc_count": 1,
"key": "2006"
},
{
"doc_count": 1,
"key": "2005"
},
{
"doc_count": 1,
"key": "2004"
},
{
"doc_count": 1,
"key": "2002"
},
{
"doc_count": 1,
"key": "1998"
},
{
"doc_count": 2,
"key": "1996"
},
{
"doc_count": 1,
"key": "1995"
},
{
"doc_count": 2,
"key": "1993"
},
{
"doc_count": 5,
"key": "1992"
},
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 1,
"key": "1988"
},
{
"doc_count": 1,
"key": "1981"
},
{
"doc_count": 1,
"key": "1979"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 23,
"key": "2015"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 2,
"key": "2011"
},
{
"doc_count": 2,
"key": "2009"
},
{
"doc_count": 1,
"key": "2008"
},
{
"doc_count": 1,
"key": "2006"
},
{
"doc_count": 1,
"key": "2005"
},
{
"doc_count": 2,
"key": "2000"
},
{
"doc_count": 2,
"key": "1997"
},
{
"doc_count": 1,
"key": "1994"
},
{
"doc_count": 1,
"key": "1993"
},
{
"doc_count": 1,
"key": "1988"
},
{
"doc_count": 1,
"key": "1986"
},
{
"doc_count": 1,
"key": "1984"
},
{
"doc_count": 1,
"key": "1981"
},
{
"doc_count": 1,
"key": "1980"
},
{
"doc_count": 2,
"key": "1974"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 20,
"key": "2013"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 2,
"key": "2012"
},
{
"doc_count": 3,
"key": "2010"
},
{
"doc_count": 1,
"key": "2009"
},
{
"doc_count": 1,
"key": "2008"
},
{
"doc_count": 1,
"key": "2006"
},
{
"doc_count": 2,
"key": "2002"
},
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 2,
"key": "1988"
},
{
"doc_count": 3,
"key": "1984"
},
{
"doc_count": 2,
"key": "1980"
},
{
"doc_count": 1,
"key": "1972"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 19,
"key": "2014"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 2,
"key": "2013"
},
{
"doc_count": 1,
"key": "2012"
},
{
"doc_count": 3,
"key": "2011"
},
{
"doc_count": 1,
"key": "2010"
},
{
"doc_count": 2,
"key": "2009"
},
{
"doc_count": 3,
"key": "2005"
},
{
"doc_count": 1,
"key": "2004"
},
{
"doc_count": 1,
"key": "2003"
},
{
"doc_count": 1,
"key": "1998"
},
{
"doc_count": 1,
"key": "1997"
},
{
"doc_count": 1,
"key": "1995"
},
{
"doc_count": 1,
"key": "1959"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 18,
"key": "2016"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1996"
},
{
"doc_count": 1,
"key": "1994"
},
{
"doc_count": 2,
"key": "1990"
},
{
"doc_count": 2,
"key": "1989"
},
{
"doc_count": 1,
"key": "1988"
},
{
"doc_count": 1,
"key": "1985"
},
{
"doc_count": 1,
"key": "1984"
},
{
"doc_count": 1,
"key": "1980"
},
{
"doc_count": 1,
"key": "1975"
},
{
"doc_count": 1,
"key": "1970"
},
{
"doc_count": 1,
"key": "1968"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 13,
"key": "2007"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2008"
},
{
"doc_count": 1,
"key": "2006"
},
{
"doc_count": 1,
"key": "2005"
},
{
"doc_count": 1,
"key": "2002"
},
{
"doc_count": 2,
"key": "2001"
},
{
"doc_count": 1,
"key": "1994"
},
{
"doc_count": 1,
"key": "1988"
},
{
"doc_count": 1,
"key": "1985"
},
{
"doc_count": 1,
"key": "1984"
},
{
"doc_count": 1,
"key": "1982"
},
{
"doc_count": 2,
"key": "1974"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 13,
"key": "2011"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2006"
},
{
"doc_count": 1,
"key": "2003"
},
{
"doc_count": 1,
"key": "2001"
},
{
"doc_count": 1,
"key": "2000"
},
{
"doc_count": 1,
"key": "1998"
},
{
"doc_count": 1,
"key": "1997"
},
{
"doc_count": 1,
"key": "1995"
},
{
"doc_count": 1,
"key": "1993"
},
{
"doc_count": 1,
"key": "1991"
},
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 1,
"key": "1984"
},
{
"doc_count": 1,
"key": "1980"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 12,
"key": "2010"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2008"
},
{
"doc_count": 2,
"key": "2007"
},
{
"doc_count": 1,
"key": "2005"
},
{
"doc_count": 1,
"key": "2004"
},
{
"doc_count": 1,
"key": "2002"
},
{
"doc_count": 1,
"key": "1998"
},
{
"doc_count": 2,
"key": "1990"
},
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 1,
"key": "1968"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 11,
"key": "2009"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 1,
"key": "1988"
},
{
"doc_count": 2,
"key": "1986"
},
{
"doc_count": 2,
"key": "1975"
},
{
"doc_count": 2,
"key": "1974"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 8,
"key": "1999"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1994"
},
{
"doc_count": 2,
"key": "1991"
},
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 1,
"key": "1986"
},
{
"doc_count": 1,
"key": "1979"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 6,
"key": "1998"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2003"
},
{
"doc_count": 1,
"key": "1996"
},
{
"doc_count": 1,
"key": "1995"
},
{
"doc_count": 1,
"key": "1994"
},
{
"doc_count": 1,
"key": "1991"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 6,
"key": "2006"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1999"
},
{
"doc_count": 1,
"key": "1990"
},
{
"doc_count": 1,
"key": "1986"
},
{
"doc_count": 1,
"key": "1974"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 5,
"key": "2000"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "2004"
},
{
"doc_count": 1,
"key": "2003"
},
{
"doc_count": 1,
"key": "1994"
},
{
"doc_count": 1,
"key": "1981"
},
{
"doc_count": 1,
"key": "1964"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 5,
"key": "2008"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1995"
},
{
"doc_count": 1,
"key": "1992"
},
{
"doc_count": 1,
"key": "1983"
},
{
"doc_count": 1,
"key": "1971"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 4,
"key": "2004"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1989"
},
{
"doc_count": 1,
"key": "1979"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 2,
"key": "2001"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1996"
},
{
"doc_count": 1,
"key": "1989"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 2,
"key": "2005"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1994"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 1,
"key": "1997"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1972"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 1,
"key": "2002"
},
{
"reference_publication_year": {
"sum_other_doc_count": 0,
"buckets": [
{
"doc_count": 1,
"key": "1992"
}
],
"doc_count_error_upper_bound": 0
},
"doc_count": 1,
"key": "2003"
}
],
"doc_count_error_upper_bound": 0
}
},
"timed_out": False,
"_shards": {
"failed": 0,
"total": 5,
"successful": 5
}
}
result = citedby_journal.compute_citations(query_result)
self.assertEqual([('2012', ('2012', 1)), ('2012', ('2011', 2)), ('2012', ('2010', 1)), ('2012', ('2008', 2)), ('2012', ('2007', 3)), ('2012', ('2005', 2)), ('2012', ('2003', 1)), ('2012', ('2001', 1)), ('2012', ('1998', 1)), ('2012', ('1997', 1)), ('2012', ('1993', 1)), ('2012', ('1990', 1)), ('2012', ('1988', 3)), ('2012', ('1986', 1)), ('2012', ('1980', 2)), ('2012', ('1979', 1)), ('2012', ('1973', 1)), ('2015', ('2013', 1)), ('2015', ('2012', 3)), ('2015', ('2006', 1)), ('2015', ('2005', 1)), ('2015', ('2004', 1)), ('2015', ('2002', 1)), ('2015', ('1998', 1)), ('2015', ('1996', 2)), ('2015', ('1995', 1)), ('2015', ('1993', 2)), ('2015', ('1992', 5)), ('2015', ('1989', 1)), ('2015', ('1988', 1)), ('2015', ('1981', 1)), ('2015', ('1979', 1)), ('2013', ('2011', 2)), ('2013', ('2009', 2)), ('2013', ('2008', 1)), ('2013', ('2006', 1)), ('2013', ('2005', 1)), ('2013', ('2000', 2)), ('2013', ('1997', 2)), ('2013', ('1994', 1)), ('2013', ('1993', 1)), ('2013', ('1988', 1)), ('2013', ('1986', 1)), ('2013', ('1984', 1)), ('2013', ('1981', 1)), ('2013', ('1980', 1)), ('2013', ('1974', 2)), ('2014', ('2012', 2)), ('2014', ('2010', 3)), ('2014', ('2009', 1)), ('2014', ('2008', 1)), ('2014', ('2006', 1)), ('2014', ('2002', 2)), ('2014', ('1989', 1)), ('2014', ('1988', 2)), ('2014', ('1984', 3)), ('2014', ('1980', 2)), ('2014', ('1972', 1)), ('2016', ('2013', 2)), ('2016', ('2012', 1)), ('2016', ('2011', 3)), ('2016', ('2010', 1)), ('2016', ('2009', 2)), ('2016', ('2005', 3)), ('2016', ('2004', 1)), ('2016', ('2003', 1)), ('2016', ('1998', 1)), ('2016', ('1997', 1)), ('2016', ('1995', 1)), ('2016', ('1959', 1)), ('2007', ('1996', 1)), ('2007', ('1994', 1)), ('2007', ('1990', 2)), ('2007', ('1989', 2)), ('2007', ('1988', 1)), ('2007', ('1985', 1)), ('2007', ('1984', 1)), ('2007', ('1980', 1)), ('2007', ('1975', 1)), ('2007', ('1970', 1)), ('2007', ('1968', 1)), ('2011', ('2008', 1)), ('2011', ('2006', 1)), ('2011', ('2005', 1)), ('2011', ('2002', 1)), ('2011', ('2001', 2)), ('2011', ('1994', 1)), ('2011', ('1988', 1)), ('2011', ('1985', 1)), ('2011', ('1984', 1)), ('2011', ('1982', 1)), ('2011', ('1974', 2)), ('2010', ('2006', 1)), ('2010', ('2003', 1)), ('2010', ('2001', 1)), ('2010', ('2000', 1)), ('2010', ('1998', 1)), ('2010', ('1997', 1)), ('2010', ('1995', 1)), ('2010', ('1993', 1)), ('2010', ('1991', 1)), ('2010', ('1989', 1)), ('2010', ('1984', 1)), ('2010', ('1980', 1)), ('2009', ('2008', 1)), ('2009', ('2007', 2)), ('2009', ('2005', 1)), ('2009', ('2004', 1)), ('2009', ('2002', 1)), ('2009', ('1998', 1)), ('2009', ('1990', 2)), ('2009', ('1989', 1)), ('2009', ('1968', 1)), ('1999', ('1989', 1)), ('1999', ('1988', 1)), ('1999', ('1986', 2)), ('1999', ('1975', 2)), ('1999', ('1974', 2)), ('1998', ('1994', 1)), ('1998', ('1991', 2)), ('1998', ('1989', 1)), ('1998', ('1986', 1)), ('1998', ('1979', 1)), ('2006', ('2003', 1)), ('2006', ('1996', 1)), ('2006', ('1995', 1)), ('2006', ('1994', 1)), ('2006', ('1991', 1)), ('2000', ('1999', 1)), ('2000', ('1990', 1)), ('2000', ('1986', 1)), ('2000', ('1974', 1)), ('2008', ('2004', 1)), ('2008', ('2003', 1)), ('2008', ('1994', 1)), ('2008', ('1981', 1)), ('2008', ('1964', 1)), ('2004', ('1995', 1)), ('2004', ('1992', 1)), ('2004', ('1983', 1)), ('2004', ('1971', 1)), ('2001', ('1989', 1)), ('2001', ('1979', 1)), ('2005', ('1996', 1)), ('2005', ('1989', 1)), ('1997', ('1994', 1)), ('2002', ('1972', 1)), ('2003', ('1992', 1))], result)
| 49.007246 | 3,438 | 0.152299 | 1,647 | 40,578 | 3.530055 | 0.063752 | 0.288958 | 0.179567 | 0.239422 | 0.670279 | 0.665807 | 0.649467 | 0.616099 | 0.588751 | 0.514792 | 0 | 0.217624 | 0.746907 | 40,578 | 827 | 3,439 | 49.066505 | 0.348491 | 0 | 0 | 0.513415 | 0 | 0 | 0.137907 | 0.026788 | 0 | 0 | 0 | 0 | 0.00122 | 1 | 0.00122 | false | 0 | 0.002439 | 0 | 0.004878 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1b1ee8e8bc13330b11c81e9955cb1e677fea76be | 50,896 | py | Python | pirates/leveleditor/worldData/port_royal_area_jungle_a_1.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 81 | 2018-04-08T18:14:24.000Z | 2022-01-11T07:22:15.000Z | pirates/leveleditor/worldData/port_royal_area_jungle_a_1.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 4 | 2018-09-13T20:41:22.000Z | 2022-01-08T06:57:00.000Z | pirates/leveleditor/worldData/port_royal_area_jungle_a_1.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 26 | 2018-05-26T12:49:27.000Z | 2021-09-11T09:11:59.000Z | from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Interact Links': [['1176150400.0dxschafe', '1176151296.0dxschafe', 'Bi-directional'], ['1176149760.0dxschafe', '1176151040.0dxschafe0', 'Bi-directional'], ['1186437632.0dxschafe0', '1176151168.0dxschafe0', 'Bi-directional'], ['1190848640.0dxschafe', '1176150912.0dxschafe', 'Bi-directional'], ['1170568896.56sdnaik', '1176151040.0dxschafe1', 'Bi-directional'], ['1176151424.0dxschafe', '1189818368.0dxschafe1', 'Bi-directional'], ['1176151040.0dxschafe', '1189818496.0dxschafe', 'Bi-directional']],'Objects': {'1169592956.59sdnaik': {'Type': 'Island Game Area','Name': 'port_royal_area_jungle_a_1','File': '','AdditionalData': ['JungleAreaA'],'Environment': 'Jungle','Footstep Sound': 'Sand','Instanced': True,'Minimap': True,'Minimap Prefix': 'minimap','Objects': {'1165004689.08sdnaik': {'Type': 'Locator Node','Name': 'portal_interior_1','Hpr': VBase3(-81.0, 0.0, 0.0),'Pos': Point3(36.719, 255.714, 7.06),'Scale': VBase3(1.0, 1.0, 1.0)},'1165004689.08sdnaik0': {'Type': 'Locator Node','Name': 'portal_interior_2','Hpr': VBase3(142.379, 0.0, 0.0),'Pos': Point3(837.183, 5.167, 52.393),'Scale': VBase3(1.0, 1.0, 1.0)},'1165004689.09sdnaik': {'Type': 'Locator Node','Name': 'portal_interior_3','Hpr': VBase3(-79.736, 0.0, 0.0),'Pos': Point3(380.725, 407.485, 61.219),'Scale': VBase3(1.0, 1.0, 1.0)},'1169864696.7sdnaik': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '3','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(733.749, 1.651, 50.962),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Low Skeleton','Team': '1','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1169864772.36sdnaik': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(620.991, 29.674, 52.211),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1169864776.48sdnaik': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(597.214, 91.4, 53.221),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1170568896.56sdnaik': {'Type': 'Spawn Node','Aggro Radius': '23.7952','AnimSet': 'default','Hpr': VBase3(-103.415, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(31.895, 92.81, 4.309),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Gator T2','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1170568957.41sdnaik': {'Type': 'Spawn Node','Aggro Radius': '43.3735','AnimSet': 'default','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(-6.53, 41.535, -3.0),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Gator T2','Start State': 'Idle','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0.0, 0.0, 0.65, 1.0),'Model': 'models/misc/smiley'}},'1176148480.0dxschafe0': {'Type': 'Cemetary','DisableCollision': True,'Holiday': '','Hpr': VBase3(154.974, 1.297, 0.15),'Pos': Point3(589.824, -56.073, 57.693),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/crypt1'}},'1176148864.0dxschafe': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(78.046, 1.755, 3.812),'Pos': Point3(493.344, -44.369, 68.356),'Scale': VBase3(1.283, 1.283, 1.283),'Visual': {'Color': (0.45, 0.5600000023841858, 0.5, 1.0),'Model': 'models/props/crypt1'}},'1176148864.0dxschafe0': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(160.357, 1.624, -5.263),'Pos': Point3(543.854, -28.695, 63.444),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.68, 0.78, 0.699999988079071, 1.0),'Model': 'models/props/crypt2'}},'1176148992.0dxschafe': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(75.365, 4.04, 1.779),'Pos': Point3(503.033, 1.363, 67.429),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.84, 0.84, 0.8899999856948853, 1.0),'Model': 'models/props/crypt1'}},'1176148992.0dxschafe0': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(77.998, -0.824, -1.069),'Pos': Point3(499.149, -21.763, 68.374),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.68, 0.78, 0.699999988079071, 1.0),'Model': 'models/props/crypt2'}},'1176149120.0dxschafe': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.829),'Pos': Point3(525.913, -44.581, 64.968),'Scale': VBase3(0.859, 0.859, 0.859),'Visual': {'Color': (0.54, 0.61, 0.54, 1.0),'Model': 'models/props/crypt2'}},'1176149248.0dxschafe': {'Type': 'Cemetary','DisableCollision': True,'Holiday': '','Hpr': VBase3(3.103, 11.818, 1.32),'Pos': Point3(575.616, -47.032, 59.663),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_a'}},'1176149248.0dxschafe1': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(-2.017, 17.936, 6.525),'Pos': Point3(532.038, -23.401, 64.861),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_c'}},'1176149248.0dxschafe2': {'Type': 'Cemetary','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(481.68, 20.009, 68.876),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_a'}},'1176149248.0dxschafe3': {'Type': 'Cemetary','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(475.017, 30.925, 68.573),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_a'}},'1176149248.0dxschafe4': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(484.846, 43.778, 68.439),'Scale': VBase3(1.353, 0.829, 1.353),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_c'}},'1176149248.0dxschafe5': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(30.81, -0.178, 4.357),'Pos': Point3(514.043, -52.091, 66.93),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_d'}},'1176149376.0dxschafe': {'Type': 'Cemetary','DisableCollision': False,'Holiday': '','Hpr': VBase3(-2.674, 8.699, 5.439),'Pos': Point3(559.009, -37.012, 63.264),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_d'}},'1176149376.0dxschafe0': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(179.925, 2.392, -3.647),'Pos': Point3(492.727, 31.86, 68.929),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_d'}},'1176149376.0dxschafe1': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(-1.972, -2.512, 3.565),'Pos': Point3(483.233, 0.806, 68.502),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_d'}},'1176149376.0dxschafe2': {'Type': 'Cemetary','DisableCollision': False,'Holiday': '','Hpr': VBase3(179.925, 2.392, -3.647),'Pos': Point3(578.936, -31.555, 59.109),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_d'}},'1176149376.0dxschafe3': {'Type': 'Cemetary','DisableCollision': False,'Holiday': '','Hpr': VBase3(179.925, 2.392, -3.647),'Pos': Point3(607.105, -40.696, 55.137),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_b'}},'1176149376.0dxschafe4': {'Type': 'Cemetary','DisableCollision': False,'Hpr': VBase3(168.194, 3.084, -3.084),'Pos': Point3(503.077, 18.34, 67.33),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_cem_headstones_b'}},'1176149760.0dxschafe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'barrel_hide','Hpr': VBase3(-24.419, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(501.158, -29.033, 68.153),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1176149760.0dxschafe1': {'Type': 'Spawn Node','Aggro Radius': '5.1205','AnimSet': 'attack_sword_thrust','Hpr': VBase3(-103.082, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(558.375, -5.971, 61.411),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1176150400.0dxschafe': {'Type': 'Spawn Node','Aggro Radius': '28.6145','AnimSet': 'default','Hpr': VBase3(88.48, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(289.982, 128.808, 51.564),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Ambush','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1176150912.0dxschafe': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(637.963, 183.674, 51.985),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176151040.0dxschafe': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(737.016, -59.593, 52.442),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176151040.0dxschafe0': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(470.416, -12.157, 67.315),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176151040.0dxschafe1': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(237.601, 21.204, 47.292),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176151168.0dxschafe0': {'Type': 'Object Spawn Node','Hpr': VBase3(0.0, 0.0, 348.69),'Pos': Point3(133.933, -13.197, 24.6),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176151296.0dxschafe': {'Type': 'Object Spawn Node','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(221.094, 105.586, 45.707),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176151424.0dxschafe': {'Type': 'Object Spawn Node','Hpr': VBase3(0.0, 0.0, 344.745),'Pos': Point3(130.778, 222.77, 23.3),'Priority': '1','Scale': VBase3(1.0, 1.0, 1.0),'SpawnDelay': '20','Spawnables': 'Buried Treasure','VisSize': '','Visual': {'Color': (0.8, 0.2, 0.65, 1),'Model': 'models/misc/smiley'},'startingDepth': '12'},'1176152576.0dxschafe': {'Type': 'Player Spawn Node','Hpr': VBase3(-110.794, 0.0, 0.0),'Index': -1,'Pos': Point3(67.109, 188.629, 7.04),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe0': {'Type': 'Player Spawn Node','Hpr': VBase3(-35.243, 0.0, 0.0),'Index': -1,'Pos': Point3(161.021, 57.254, 31.595),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe1': {'Type': 'Player Spawn Node','Hpr': VBase3(47.992, 0.0, 0.0),'Index': -1,'Pos': Point3(160.3, 117.521, 31.287),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe2': {'Type': 'Player Spawn Node','Hpr': VBase3(132.865, 0.0, 0.0),'Index': -1,'Pos': Point3(282.828, 239.854, 50.727),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe3': {'Type': 'Player Spawn Node','Hpr': VBase3(124.091, 0.0, 0.0),'Index': -1,'Pos': Point3(366.997, 307.201, 57.806),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe4': {'Type': 'Player Spawn Node','Hpr': VBase3(-143.415, 0.0, 0.0),'Index': -1,'Pos': Point3(427.239, 273.645, 63.037),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe5': {'Type': 'Player Spawn Node','Hpr': VBase3(117.526, 0.0, 0.0),'Index': -1,'Pos': Point3(754.507, 148.526, 52.011),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe6': {'Type': 'Player Spawn Node','Hpr': VBase3(121.304, 0.0, 0.0),'Index': -1,'Pos': Point3(817.012, 115.24, 52.055),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe7': {'Type': 'Player Spawn Node','Hpr': VBase3(100.061, 0.0, 0.0),'Index': -1,'Pos': Point3(768.328, 54.363, 52.198),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152576.0dxschafe8': {'Type': 'Player Spawn Node','Hpr': VBase3(177.285, 0.0, 0.0),'Index': -1,'Pos': Point3(566.416, 110.113, 60.237),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152704.0dxschafe': {'Type': 'Player Spawn Node','Hpr': VBase3(-105.179, 0.0, 0.0),'Index': -1,'Pos': Point3(471.942, 99.084, 67.221),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1176152704.0dxschafe0': {'Type': 'Player Spawn Node','Hpr': VBase3(-176.719, 0.0, 0.0),'Index': -1,'Pos': Point3(549.958, 169.569, 66.193),'Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'All','Visual': {'Color': (0.5, 0.5, 0.5, 1),'Model': 'models/misc/smiley'}},'1178565602.31kmuller': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(46.96, 41.05, -22.42),'Pos': Point3(16.33, 254.797, 46.86),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/vegetation/bush_i'}},'1178565658.95kmuller': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(0.0, 51.894, 0.0),'Pos': Point3(24.752, 255.026, 49.259),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/vegetation/bush_i'}},'1178565697.08kmuller': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(6.536, 0.0, 12.729),'Pos': Point3(2.531, 252.514, 53.183),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1178565746.51kmuller': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(79.77, -65.612, 90.0),'Pos': Point3(24.414, 251.111, 30.358),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_caveA_floor'}},'1178662352.26kmuller': {'Type': 'Rock','DisableCollision': False,'Hpr': VBase3(95.332, -2.39, -172.01),'Pos': Point3(25.641, 258.083, 58.994),'Scale': VBase3(1.146, 1.146, 1.146),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/rock_caveB_sphere'}},'1185924312.49kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(170.071, 0.0, 0.0),'Pos': Point3(485.019, -35.0, 68.032),'Scale': VBase3(1.0, 1.0, 1.995),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185924346.47kmuller': {'Type': 'Bush','DisableCollision': False,'Hpr': VBase3(71.495, 0.0, 0.0),'Pos': Point3(485.137, -37.819, 68.37),'Scale': VBase3(0.637, 0.637, 0.764),'Visual': {'Model': 'models/vegetation/bush_b'}},'1186183680.0dxschafe0': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(-79.745, 0.0, 0.0),'Pos': Point3(-65.715, 103.347, 11.261),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1186183808.0dxschafe': {'Type': 'Bush','DisableCollision': True,'Hpr': VBase3(57.869, 0.0, 0.0),'Pos': Point3(-73.734, 92.182, 11.466),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/vegetation/bush_a'}},'1186437632.0dxschafe0': {'Type': 'Spawn Node','Aggro Radius': '23.4940','AnimSet': 'default','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(89.153, 16.511, 4.66),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Gator T2','Start State': 'Patrol','StartFrame': '0','Team': '1','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187140736.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'attack_bayonetA','Hpr': VBase3(76.799, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(570.376, -9.138, 60.014),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Navy T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187140864.0dchiappe': {'Type': 'Effect Node','EffectName': 'steam_effect','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(571.207, -6.76, 59.911),'Scale': VBase3(0.642, 0.642, 0.642),'Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187140864.0dchiappe0': {'Type': 'Spawn Node','Aggro Radius': '8.1325','AnimSet': 'attack_bayonetB','Hpr': VBase3(-162.984, 0.0, 0.0),'Min Population': '1','Patrol Radius': '1.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(572.465, -11.501, 59.773),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Navy T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187140992.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '6.3253','AnimSet': 'attack_sword_slash','Hpr': VBase3(23.445, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(576.834, -18.302, 59.276),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187141504.0dchiappe': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '120.0000','DropOff': '5.4217','FlickRate': '0.5843','Flickering': False,'Hpr': VBase3(0.948, -63.625, 0.0),'Intensity': '2.0000','LightType': 'SPOT','Pos': Point3(571.772, -16.611, 93.696),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1.0, 0.48, 0.09, 1.0),'Model': 'models/props/light_tool_bulb'}},'1187142016.0dchiappe': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(111.577, 0.0, 0.0),'Pos': Point3(439.986, 89.297, 64.502),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/torch'}},'1187142016.0dchiappe0': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(436.88, 61.088, 64.292),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/torch'}},'1187142784.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'barrel_hide','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '1.9277','Pause Chance': '100','Pause Duration': '30','Pos': Point3(492.648, 42.563, 69.005),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187143040.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'barrel_hide','Hpr': VBase3(-65.423, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(654.219, 121.787, 52.104),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187143552.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'barrel_hide','Hpr': VBase3(93.057, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(632.377, 121.03, 52.495),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187143552.0dchiappe0': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'default','Hpr': VBase3(-178.849, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(643.189, 120.535, 52.111),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Ambush','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187207168.0dchiappe0': {'Type': 'Grass','DisableCollision': False,'Hpr': VBase3(167.1, 0.0, 0.0),'Pos': Point3(571.213, 13.062, 59.871),'Scale': VBase3(0.569, 0.569, 0.569),'Visual': {'Model': 'models/vegetation/grass_18feet'}},'1187207296.0dchiappe': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(-114.294, 0.0, 0.0),'Pos': Point3(343.019, 357.975, 55.649),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/torch'}},'1187207296.0dchiappe0': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(407.098, 358.443, 61.14),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/torch'}},'1187207424.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'attack_sword_lunge','Hpr': VBase3(160.365, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(467.678, 136.283, 66.78),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187207424.0dchiappe0': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'attack_sword_slash','Hpr': VBase3(-46.212, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(463.942, 126.53, 66.48),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Navy T3','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187207424.0dchiappe1': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'attack_sword_thrust','Hpr': VBase3(101.575, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(471.03, 130.706, 67.079),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187212800.0dchiappe1': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(30.123, 189.272, 8.177),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/torch'}},'1187212800.0dchiappe2': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': VBase3(87.209, 0.0, 0.0),'Pos': Point3(84.42, 192.4, 11.291),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/torch'}},'1187640576.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '4.8193','AnimSet': 'attack_sword_lunge','Hpr': VBase3(93.492, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(420.426, 226.245, 62.549),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187640704.0dchiappe': {'Type': 'Spawn Node','Aggro Radius': '5.4217','AnimSet': 'attack_bayonetA','Hpr': VBase3(-87.385, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(408.791, 224.918, 61.554),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Navy T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1187641088.0dchiappe': {'Type': 'Cart','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(32.745, 195.377, 8.087),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/pir_m_prp_mkt_wheelbarrow'}},'1187641088.0dchiappe0': {'Type': 'Rock','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(32.427, 192.242, 7.722),'Scale': VBase3(0.213, 0.213, 0.213),'Visual': {'Model': 'models/props/rockpile_cave_stone'}},'1187641088.0dchiappe1': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(32.82, 194.445, 8.086),'Scale': VBase3(0.803, 0.803, 0.803),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_sphere'}},'1189818368.0dxschafe': {'Type': 'Spawn Node','Aggro Radius': '16.5663','AnimSet': 'gp_chant_a','Hpr': VBase3(30.29, 0.0, 0.0),'Min Population': '1','Patrol Radius': '15.8795','Pause Chance': '100','Pause Duration': '30','Pos': Point3(412.625, 310.428, 61.711),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1189818368.0dxschafe0': {'Type': 'Spawn Node','Aggro Radius': '4.8193','AnimSet': 'default','Hpr': VBase3(-39.282, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(228.014, 215.324, 46.078),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1189818368.0dxschafe1': {'Type': 'Spawn Node','Aggro Radius': '50.0000','AnimSet': 'default','Hpr': VBase3(46.48, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(106.511, 141.197, 17.169),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1189818496.0dxschafe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'default','Hpr': VBase3(-96.308, 0.0, 0.0),'Min Population': '1','Patrol Radius': '5.0060','Pause Chance': '100','Pause Duration': '30','Pos': Point3(733.364, 19.069, 52.283),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Patrol','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1189818496.0dxschafe0': {'Type': 'Spawn Node','Aggro Radius': '42.1687','AnimSet': 'default','Hpr': VBase3(-147.559, 0.0, 0.0),'Min Population': '1','Patrol Radius': '5.6928','Pause Chance': '100','Pause Duration': '5','Pos': Point3(772.363, 89.309, 52.137),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1190848640.0dxschafe': {'Type': 'Spawn Node','AnimSet': 'default','Hpr': Point3(0.0, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': 100,'Pause Duration': 30,'Pos': Point3(641.886, 168.292, 52.014),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Ambush','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1192580224.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(88.211, 82.731, 12.519),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1192580224.0dxschafe0': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(15.268, 125.271, 8.734),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1192644992.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(-57.868, 78.26, 2.587),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1192645120.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(-31.06, 181.146, 10.074),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1192645248.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(141.053, 22.063, 26.451),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1192645248.0dxschafe0': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(226.135, 205.253, 45.938),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193084928.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(572.667, 87.001, 59.551),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193084928.0dxschafe0': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(509.968, -28.143, 67.121),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085056.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(324.389, 89.664, 55.419),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085056.0dxschafe0': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(543.677, -13.187, 63.146),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085056.0dxschafe1': {'Type': 'Spawn Node','Aggro Radius': '16.5663','AnimSet': 'gp_chant_a','Hpr': VBase3(30.29, 0.0, 0.0),'Min Population': '1','Patrol Radius': '15.8795','Pause Chance': '100','Pause Duration': '30','Pos': Point3(501.256, 187.634, 67.705),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T1','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1193085056.0dxschafe2': {'Type': 'Spawn Node','Aggro Radius': '16.5663','AnimSet': 'gp_chant_a','Hpr': VBase3(-7.304, 0.0, 0.0),'Min Population': '1','Patrol Radius': '15.8795','Pause Chance': '100','Pause Duration': '30','Pos': Point3(508.346, -46.097, 67.256),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Idle','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1193085184.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(518.104, 65.355, 66.067),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085312.0dxschafe': {'Type': 'Spawn Node','Aggro Radius': '12.0000','AnimSet': 'default','Hpr': VBase3(157.792, 0.0, 0.0),'Min Population': '1','Patrol Radius': '12.0000','Pause Chance': '100','Pause Duration': '30','Pos': Point3(640.191, 125.478, 52.102),'PoseAnim': '','PoseFrame': '','PropLeft': 'None','PropRight': 'None','Scale': VBase3(1.0, 1.0, 1.0),'Spawnables': 'Skel T2','Start State': 'Ambush','StartFrame': '0','Team': 'default','TrailFX': 'None','TrailLeft': 'None','TrailRight': 'None','VisSize': '','Visual': {'Color': (0, 0, 0.65, 1),'Model': 'models/misc/smiley'}},'1193085312.0dxschafe0': {'Type': 'Movement Node','Hpr': VBase3(-17.857, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(591.75, -51.137, 59.631),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085440.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(513.053, -0.977, 66.705),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085440.0dxschafe0': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(689.906, 29.589, 52.278),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1193085568.0dxschafe': {'Type': 'Movement Node','Hpr': Point3(0.0, 0.0, 0.0),'Pause Chance': '0','Pause Duration': '5','Pos': Point3(492.643, 49.319, 68.992),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.65, 0, 0, 1),'Model': 'models/misc/smiley'}},'1230925964.91kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(163.887, 0.0, 0.0),'Pos': Point3(598.102, -48.937, 54.544),'Scale': VBase3(3.927, 2.563, 3.586),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1230926058.2kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-2.415, 0.0, 0.0),'Pos': Point3(572.306, -48.934, 58.883),'Scale': VBase3(2.898, 2.405, 2.583),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}},'Visibility': 'Grid','Visual': {'Model': 'models/jungles/jungle_a_zero'}}},'TodSettings': {'AmbientColors': {0: Vec4(0.45, 0.53, 0.65, 1),2: Vec4(1, 1, 1, 1),4: Vec4(0.4, 0.45, 0.5, 1),6: Vec4(0.44, 0.45, 0.56, 1),8: Vec4(0.39, 0.42, 0.54, 1),12: Vec4(0.34, 0.28, 0.41, 1),13: Vec4(0.34, 0.28, 0.41, 1),14: Vec4(0.66, 0.76, 0.41, 1),15: Vec4(0.66, 0.76, 0.41, 1),16: Vec4(0.25, 0.25, 0.25, 1),17: Vec4(0.66, 0.76, 0.41, 1)},'DirectionalColors': {0: Vec4(0.55, 0.46, 0.35, 1),2: Vec4(1, 1, 1, 1),4: Vec4(0.6, 0.34, 0.1, 1),6: Vec4(0.46, 0.48, 0.45, 1),8: Vec4(0.42, 0.42, 0.4, 1),12: Vec4(0.66, 0.76, 0.05, 1),13: Vec4(0.66, 0.76, 0.05, 1),14: Vec4(0.3, 0.2, 0.53, 1),15: Vec4(0.3, 0.2, 0.53, 1),16: Vec4(0, 0, 0, 1),17: Vec4(0.3, 0.2, 0.53, 1)},'FogColors': {0: Vec4(0.3, 0.2, 0.15, 0),2: Vec4(0.6, 0.694118, 0.894118, 1),4: Vec4(0.3, 0.18, 0.15, 0),6: Vec4(0.15, 0.2, 0.35, 0),8: Vec4(0.05, 0.06, 0.17, 0),12: Vec4(0.1, 0.12, 0.03, 0),13: Vec4(0.1, 0.12, 0.03, 0),14: Vec4(0.1, 0.12, 0.03, 0),15: Vec4(0.1, 0.12, 0.03, 0),16: Vec4(0.25, 0.25, 0.25, 1),17: Vec4(0.1, 0.12, 0.03, 0)},'FogRanges': {0: 0.0001,2: 9.999999747378752e-05,4: 0.0001,6: 0.0001,8: 0.0002,12: 0.00025,13: 0.00025,14: 0.00025,15: 0.00025,16: 0.0001,17: 0.005},'LinearFogRanges': {0: (0.0, 100.0),2: (0.0, 100.0),4: (0.0, 100.0),6: (0.0, 100.0),8: (0.0, 100.0),12: (0.0, 100.0),13: (0.0, 100.0),14: (0.0, 100.0),15: (0.0, 100.0),16: (0.0, 100.0),17: (0.0, 100.0)}},'Node Links': [['1169864776.48sdnaik', '1169864772.36sdnaik', 'Bi-directional'], ['1192580224.0dxschafe', '1186437632.0dxschafe0', 'Bi-directional'], ['1192580224.0dxschafe0', '1192580224.0dxschafe', 'Bi-directional'], ['1192644992.0dxschafe', '1192580224.0dxschafe0', 'Bi-directional'], ['1192645120.0dxschafe', '1176150400.0dxschafe', 'Bi-directional'], ['1192645248.0dxschafe', '1192645248.0dxschafe0', 'Bi-directional'], ['1189818368.0dxschafe1', '1192645248.0dxschafe', 'Bi-directional'], ['1189818368.0dxschafe1', '1192645248.0dxschafe0', 'Bi-directional'], ['1193084928.0dxschafe', '1193084928.0dxschafe0', 'Bi-directional'], ['1189818368.0dxschafe', '1193084928.0dxschafe', 'Bi-directional'], ['1193085056.0dxschafe2', '1193085184.0dxschafe', 'Bi-directional'], ['1193085184.0dxschafe', '1193085056.0dxschafe', 'Bi-directional'], ['1193085056.0dxschafe0', '1193085056.0dxschafe1', 'Bi-directional'], ['1193085312.0dxschafe0', '1193085312.0dxschafe', 'Bi-directional'], ['1193085440.0dxschafe0', '1189818496.0dxschafe0', 'Bi-directional'], ['1193085440.0dxschafe0', '1193085440.0dxschafe', 'Bi-directional'], ['1189818496.0dxschafe', '1193085568.0dxschafe', 'Bi-directional']],'Layers': {},'ObjectIds': {'1165004689.08sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1165004689.08sdnaik"]','1165004689.08sdnaik0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1165004689.08sdnaik0"]','1165004689.09sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1165004689.09sdnaik"]','1169592956.59sdnaik': '["Objects"]["1169592956.59sdnaik"]','1169864696.7sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1169864696.7sdnaik"]','1169864772.36sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1169864772.36sdnaik"]','1169864776.48sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1169864776.48sdnaik"]','1170568896.56sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1170568896.56sdnaik"]','1170568957.41sdnaik': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1170568957.41sdnaik"]','1176148480.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176148480.0dxschafe0"]','1176148864.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176148864.0dxschafe"]','1176148864.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176148864.0dxschafe0"]','1176148992.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176148992.0dxschafe"]','1176148992.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176148992.0dxschafe0"]','1176149120.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149120.0dxschafe"]','1176149248.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149248.0dxschafe"]','1176149248.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149248.0dxschafe1"]','1176149248.0dxschafe2': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149248.0dxschafe2"]','1176149248.0dxschafe3': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149248.0dxschafe3"]','1176149248.0dxschafe4': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149248.0dxschafe4"]','1176149248.0dxschafe5': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149248.0dxschafe5"]','1176149376.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149376.0dxschafe"]','1176149376.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149376.0dxschafe0"]','1176149376.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149376.0dxschafe1"]','1176149376.0dxschafe2': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149376.0dxschafe2"]','1176149376.0dxschafe3': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149376.0dxschafe3"]','1176149376.0dxschafe4': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149376.0dxschafe4"]','1176149760.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149760.0dxschafe"]','1176149760.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176149760.0dxschafe1"]','1176150400.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176150400.0dxschafe"]','1176150912.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176150912.0dxschafe"]','1176151040.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176151040.0dxschafe"]','1176151040.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176151040.0dxschafe0"]','1176151040.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176151040.0dxschafe1"]','1176151168.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176151168.0dxschafe0"]','1176151296.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176151296.0dxschafe"]','1176151424.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176151424.0dxschafe"]','1176152576.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe"]','1176152576.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe0"]','1176152576.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe1"]','1176152576.0dxschafe2': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe2"]','1176152576.0dxschafe3': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe3"]','1176152576.0dxschafe4': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe4"]','1176152576.0dxschafe5': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe5"]','1176152576.0dxschafe6': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe6"]','1176152576.0dxschafe7': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe7"]','1176152576.0dxschafe8': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152576.0dxschafe8"]','1176152704.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152704.0dxschafe"]','1176152704.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1176152704.0dxschafe0"]','1178565602.31kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1178565602.31kmuller"]','1178565658.95kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1178565658.95kmuller"]','1178565697.08kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1178565697.08kmuller"]','1178565746.51kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1178565746.51kmuller"]','1178662352.26kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1178662352.26kmuller"]','1185924312.49kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1185924312.49kmuller"]','1185924346.47kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1185924346.47kmuller"]','1186183680.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1186183680.0dxschafe0"]','1186183808.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1186183808.0dxschafe"]','1186437632.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1186437632.0dxschafe0"]','1187140736.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187140736.0dchiappe"]','1187140864.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187140864.0dchiappe"]','1187140864.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187140864.0dchiappe0"]','1187140992.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187140992.0dchiappe"]','1187141504.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187141504.0dchiappe"]','1187142016.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187142016.0dchiappe"]','1187142016.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187142016.0dchiappe0"]','1187142784.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187142784.0dchiappe"]','1187143040.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187143040.0dchiappe"]','1187143552.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187143552.0dchiappe"]','1187143552.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187143552.0dchiappe0"]','1187207168.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187207168.0dchiappe0"]','1187207296.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187207296.0dchiappe"]','1187207296.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187207296.0dchiappe0"]','1187207424.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187207424.0dchiappe"]','1187207424.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187207424.0dchiappe0"]','1187207424.0dchiappe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187207424.0dchiappe1"]','1187212800.0dchiappe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187212800.0dchiappe1"]','1187212800.0dchiappe2': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187212800.0dchiappe2"]','1187640576.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187640576.0dchiappe"]','1187640704.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187640704.0dchiappe"]','1187641088.0dchiappe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187641088.0dchiappe"]','1187641088.0dchiappe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187641088.0dchiappe0"]','1187641088.0dchiappe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1187641088.0dchiappe1"]','1189818368.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1189818368.0dxschafe"]','1189818368.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1189818368.0dxschafe0"]','1189818368.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1189818368.0dxschafe1"]','1189818496.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1189818496.0dxschafe"]','1189818496.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1189818496.0dxschafe0"]','1190848640.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1190848640.0dxschafe"]','1192580224.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1192580224.0dxschafe"]','1192580224.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1192580224.0dxschafe0"]','1192644992.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1192644992.0dxschafe"]','1192645120.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1192645120.0dxschafe"]','1192645248.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1192645248.0dxschafe"]','1192645248.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1192645248.0dxschafe0"]','1193084928.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193084928.0dxschafe"]','1193084928.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193084928.0dxschafe0"]','1193085056.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085056.0dxschafe"]','1193085056.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085056.0dxschafe0"]','1193085056.0dxschafe1': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085056.0dxschafe1"]','1193085056.0dxschafe2': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085056.0dxschafe2"]','1193085184.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085184.0dxschafe"]','1193085312.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085312.0dxschafe"]','1193085312.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085312.0dxschafe0"]','1193085440.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085440.0dxschafe"]','1193085440.0dxschafe0': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085440.0dxschafe0"]','1193085568.0dxschafe': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1193085568.0dxschafe"]','1230925964.91kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1230925964.91kmuller"]','1230926058.2kmuller': '["Objects"]["1169592956.59sdnaik"]["Objects"]["1230926058.2kmuller"]'}}
extraInfo = {'camPos': Point3(127.069, 1289.08, 1242.35),'camHpr': VBase3(-174.428, -44.7051, 0),'focalLength': 1.39999997616,'skyState': 2,'fog': 0} | 16,965.333333 | 50,687 | 0.653018 | 7,255 | 50,896 | 4.561268 | 0.111509 | 0.02629 | 0.025656 | 0.023329 | 0.713617 | 0.564034 | 0.521304 | 0.489575 | 0.466306 | 0.457392 | 0 | 0.236091 | 0.06026 | 50,896 | 3 | 50,688 | 16,965.333333 | 0.45579 | 0 | 0 | 0 | 0 | 0 | 0.570427 | 0.218304 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
1b40e7ccef4ac996be60c1cf953ddaab56b2673f | 709 | py | Python | cmsplugin_blocks/choices_helpers.py | emencia/cmsplugin-blocks | 7ec99afd542948aef5d9069bd001729f5c14bded | [
"MIT"
] | 1 | 2019-04-14T01:30:37.000Z | 2019-04-14T01:30:37.000Z | cmsplugin_blocks/choices_helpers.py | emencia/cmsplugin-blocks | 7ec99afd542948aef5d9069bd001729f5c14bded | [
"MIT"
] | 16 | 2018-02-19T11:13:15.000Z | 2022-02-05T00:10:41.000Z | cmsplugin_blocks/choices_helpers.py | emencia/cmsplugin-blocks | 7ec99afd542948aef5d9069bd001729f5c14bded | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf import settings
def get_album_template_choices():
return settings.BLOCKS_ALBUM_TEMPLATES
def get_album_default_template():
return settings.BLOCKS_ALBUM_TEMPLATES[0][0]
def get_card_template_choices():
return settings.BLOCKS_CARD_TEMPLATES
def get_card_default_template():
return settings.BLOCKS_CARD_TEMPLATES[0][0]
def get_hero_template_choices():
return settings.BLOCKS_HERO_TEMPLATES
def get_hero_default_template():
return settings.BLOCKS_HERO_TEMPLATES[0][0]
def get_slider_template_choices():
return settings.BLOCKS_SLIDER_TEMPLATES
def get_slider_default_template():
return settings.BLOCKS_SLIDER_TEMPLATES[0][0]
| 20.257143 | 49 | 0.792666 | 96 | 709 | 5.4375 | 0.21875 | 0.091954 | 0.306513 | 0.222222 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014516 | 0.125529 | 709 | 34 | 50 | 20.852941 | 0.827419 | 0.029619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.470588 | true | 0 | 0.058824 | 0.470588 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
1b976e670a7d59129349210de9ea135817e1f053 | 6,660 | py | Python | tests/test_single_recipe_retrival.py | PatrickCmd/Yummy-Recipe-RestAPI | 8911678be501d233e39f1b5c5a46aa3e82e5c844 | [
"MIT"
] | null | null | null | tests/test_single_recipe_retrival.py | PatrickCmd/Yummy-Recipe-RestAPI | 8911678be501d233e39f1b5c5a46aa3e82e5c844 | [
"MIT"
] | 41 | 2017-11-07T00:39:02.000Z | 2019-10-21T15:09:58.000Z | tests/test_single_recipe_retrival.py | PatrickCmd/Yummy-Recipe-RestAPI | 8911678be501d233e39f1b5c5a46aa3e82e5c844 | [
"MIT"
] | 3 | 2017-11-18T16:03:34.000Z | 2017-12-20T19:49:59.000Z | # tests/test_single_recipe_retrival.py
import unittest
import json
import uuid
import time
from api import db
from api.models import User, RecipeCategory, Recipe
from tests.register_login import RegisterLogin
class TestRetriveSingleRecipeBlueprint(RegisterLogin):
def test_get_single_recipe_in_category(self):
"""
Test for getting single recipe in category
"""
response = self.register_user(
"Patrick", "Walukagga",
"pwalukagga@gmail.com", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("pwalukagga@gmail.com", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
recipe = Recipe(
name="Rolex for Lunch",
cat_id=2,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.get('/recipe_category/2/recipes/1',
headers=headers)
self.assertEqual(response.status_code, 200)
self.assertIn('Rolex for Lunch', str(response.data))
self.assertNotIn('Mix and boil', str(response.data))
# get recipe not yet in database
response = self.client.get('/recipe_category/2/recipes/4',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Recipe not found', str(response.data))
# get recipe in category not yet in database
response = self.client.get('/recipe_category/3/recipes/1',
headers=headers)
self.assertEqual(response.status_code, 404)
self.assertIn('Category not found in database',
str(response.data))
def test_get_single_recipe_in_category_catid_not_number(self):
"""
Test for getting single recipe in category cat_id and
recipe_id not number
"""
response = self.register_user(
"Patrick", "Walukagga",
"pwalukagga@gmail.com", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("pwalukagga@gmail.com", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
recipe = Recipe(
name="Rolex for Lunch",
cat_id=2,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.get('/recipe_category/a/recipes/1',
headers=headers)
self.assertEqual(response.status_code, 400)
self.assertIn('Category ID must be an integer', str(response.data))
self.assertIn('fail', str(response.data))
# recipe id not number
response = self.client.get('/recipe_category/2/recipes/a',
headers=headers)
self.assertEqual(response.status_code, 400)
self.assertIn('Recipe ID must be an integer', str(response.data))
self.assertIn('fail', str(response.data))
def test_recipe_crud_when_not_logged_in(self):
"""
Test for recipe crud when not logged in
"""
response = self.register_user(
"Patrick", "Walukagga",
"pwalukagga@gmail.com", "telnetcmd123"
)
headers=dict(Authorization='Bearer ')
category = RecipeCategory(
name="Breakfast",
description="How to make breakfast",
user_id=1
)
category.save()
response = self.create_category("LunchBuffe",
"How to make lunch buffe",
headers)
self.assertEqual(response.status_code, 401)
self.assertIn('Token is missing', str(response.data))
recipe = Recipe(
name="Rolex for breakfast",
cat_id=1,
user_id=1,
ingredients="oil, Onions, Tomatoes",
description="How to make breakfast rolex"
)
recipe.save()
response = self.create_recipe_in_category(2,
"Chicken Lunch Buffe",
"oil, Onions,Tomatoes",
"Fresh chicken",
"Mix and boil",
headers
)
response = self.client.delete('/recipe_category/2/recipes/2',
headers=headers)
self.assertEqual(response.status_code, 401)
self.assertIn('Token is missing', str(response.data))
# delete recipe not yet in database
response = self.client.delete('/recipe_category/2/recipes/4',
headers=headers)
self.assertEqual(response.status_code, 401)
self.assertIn('Token is missing', str(response.data))
# delete recipe in category not yet in database
response = self.client.delete('/recipe_category/3/recipes/1',
headers=headers)
self.assertEqual(response.status_code, 401)
self.assertIn('Token is missing', str(response.data))
if __name__ == '__main__':
unittest.main() | 37.41573 | 75 | 0.544745 | 660 | 6,660 | 5.372727 | 0.162121 | 0.05753 | 0.050761 | 0.076142 | 0.873378 | 0.850254 | 0.826565 | 0.808517 | 0.763959 | 0.746193 | 0 | 0.015981 | 0.361111 | 6,660 | 178 | 76 | 37.41573 | 0.817391 | 0.065916 | 0 | 0.689189 | 0 | 0 | 0.205053 | 0.036512 | 0 | 0 | 0 | 0 | 0.141892 | 1 | 0.02027 | false | 0 | 0.047297 | 0 | 0.074324 | 0.006757 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1bb7421f83c6ca8271b910e80ff1d08d59669bf0 | 182 | py | Python | mypy_fails/wrong_signature_forward_ref.py | brentyi/overrides | 59af886a60236a9a71b60c982bf41dfc6419231c | [
"Apache-2.0"
] | 197 | 2015-05-23T13:51:47.000Z | 2022-03-25T07:14:00.000Z | mypy_fails/wrong_signature_forward_ref.py | brentyi/overrides | 59af886a60236a9a71b60c982bf41dfc6419231c | [
"Apache-2.0"
] | 78 | 2015-05-25T20:00:22.000Z | 2022-03-21T21:50:24.000Z | mypy_fails/wrong_signature_forward_ref.py | brentyi/overrides | 59af886a60236a9a71b60c982bf41dfc6419231c | [
"Apache-2.0"
] | 33 | 2015-05-28T14:14:38.000Z | 2021-04-29T08:01:45.000Z | from overrides import overrides
class Parent:
def metoda(self) -> None:
pass
class Child(Parent):
@overrides
def metoda(self) -> 'Child':
return self
| 14 | 32 | 0.620879 | 21 | 182 | 5.380952 | 0.571429 | 0.159292 | 0.230089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 182 | 12 | 33 | 15.166667 | 0.869231 | 0 | 0 | 0 | 0 | 0 | 0.027473 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.125 | 0.125 | 0.125 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
9417edc36eeecdfeca76e1e0a3159145ea7ca4d8 | 203 | py | Python | src/onqg/dataset/__init__.py | WING-NUS/RL-for-Question-Generation | 745b8f823df9bcf4cc422c97a83ce096ac9c5e35 | [
"MIT"
] | 1 | 2021-01-05T05:30:00.000Z | 2021-01-05T05:30:00.000Z | src/onqg/dataset/__init__.py | MrSchnappi/RL-for-Question-Generation | d1966a47ef28c076902189469508194f659c5270 | [
"MIT"
] | null | null | null | src/onqg/dataset/__init__.py | MrSchnappi/RL-for-Question-Generation | d1966a47ef28c076902189469508194f659c5270 | [
"MIT"
] | 1 | 2021-03-23T16:34:06.000Z | 2021-03-23T16:34:06.000Z | import onqg.dataset.Constants as Constants
from onqg.dataset.Dataset import Dataset
from onqg.dataset.Vocab import Vocab
from onqg.dataset.data_processor import preprocess_batch, preprocess_rl_batch | 40.6 | 77 | 0.852217 | 29 | 203 | 5.827586 | 0.413793 | 0.260355 | 0.266272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 203 | 5 | 77 | 40.6 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
946ee4dd07e6728e6862a56a239dd374528ce685 | 106 | py | Python | office365/directory/device.py | andrewcchoi/Office365-REST-Python-Client | 43db12ae532c804c75a3a34f7b0d7d79e30fdac3 | [
"MIT"
] | null | null | null | office365/directory/device.py | andrewcchoi/Office365-REST-Python-Client | 43db12ae532c804c75a3a34f7b0d7d79e30fdac3 | [
"MIT"
] | null | null | null | office365/directory/device.py | andrewcchoi/Office365-REST-Python-Client | 43db12ae532c804c75a3a34f7b0d7d79e30fdac3 | [
"MIT"
] | null | null | null | from office365.directory.directoryObject import DirectoryObject
class Device(DirectoryObject):
pass
| 17.666667 | 63 | 0.830189 | 10 | 106 | 8.8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 0.122642 | 106 | 5 | 64 | 21.2 | 0.913978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
94751860482cd133c9a15b1e7297409aa5febcea | 56 | py | Python | rdf_io/protocols/__init__.py | GlauberMC/django-rdf-io | 5deaec40264407574351dd20f758b17b89b495a9 | [
"CC0-1.0"
] | null | null | null | rdf_io/protocols/__init__.py | GlauberMC/django-rdf-io | 5deaec40264407574351dd20f758b17b89b495a9 | [
"CC0-1.0"
] | null | null | null | rdf_io/protocols/__init__.py | GlauberMC/django-rdf-io | 5deaec40264407574351dd20f758b17b89b495a9 | [
"CC0-1.0"
] | null | null | null | from api import *
from rdf4j import *
from ldp import *
| 14 | 19 | 0.732143 | 9 | 56 | 4.555556 | 0.555556 | 0.487805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022727 | 0.214286 | 56 | 3 | 20 | 18.666667 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
847697dcb34d1eac713f0cf5327111b9d11705f5 | 29 | py | Python | quantifiedcode/plugins/example/backend/models/__init__.py | marcinguy/quantifiedcode | cafc8b99d56a5e51820421af5d77be8b736ab03d | [
"BSD-3-Clause"
] | 138 | 2022-02-02T15:38:29.000Z | 2022-03-30T21:23:33.000Z | quantifiedcode/plugins/example/backend/models/__init__.py | bbbfkl/scanmycode-ce | 786ae9a83a0839b70ac773a673a3ac69a0484ee4 | [
"BSD-3-Clause"
] | 14 | 2016-12-21T11:26:48.000Z | 2022-03-02T10:32:24.000Z | quantifiedcode/plugins/example/backend/models/__init__.py | bbbfkl/scanmycode-ce | 786ae9a83a0839b70ac773a673a3ac69a0484ee4 | [
"BSD-3-Clause"
] | 26 | 2017-08-01T10:00:16.000Z | 2022-02-06T15:31:55.000Z | from .example import Example
| 14.5 | 28 | 0.827586 | 4 | 29 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ca2b1b4b90c259d14c7721bc59b97bdb2d0ae7a1 | 92 | py | Python | webmap/graph/__init__.py | rtruxal/webmap-webapp | 4f068dc7d1dde72c2e19151a37194dc2cbc52b1a | [
"MIT"
] | 1 | 2019-04-28T21:17:16.000Z | 2019-04-28T21:17:16.000Z | webmap/graph/__init__.py | rtruxal/webmap-webapp | 4f068dc7d1dde72c2e19151a37194dc2cbc52b1a | [
"MIT"
] | 1 | 2019-04-30T00:49:56.000Z | 2019-04-30T00:49:56.000Z | webmap/graph/__init__.py | rtruxal/webmap-webapp | 4f068dc7d1dde72c2e19151a37194dc2cbc52b1a | [
"MIT"
] | null | null | null | from .flask_interface import URLNode, IPNode, POINTSAT
from .graphql_interface import schema | 46 | 54 | 0.858696 | 12 | 92 | 6.416667 | 0.75 | 0.38961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097826 | 92 | 2 | 55 | 46 | 0.927711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ca33149657c414ae6bea5e610d29e642d3299526 | 31 | py | Python | pavlidis/__init__.py | chunglabmit/pavlidis | 987302f28dd9101c1d74c31cea8cee31b5d39771 | [
"MIT"
] | 3 | 2019-01-12T13:13:33.000Z | 2020-05-29T06:49:51.000Z | pavlidis/__init__.py | chunglabmit/pavlidis | 987302f28dd9101c1d74c31cea8cee31b5d39771 | [
"MIT"
] | 1 | 2018-04-20T17:40:28.000Z | 2018-04-20T18:13:10.000Z | pavlidis/__init__.py | chunglabmit/pavlidis | 987302f28dd9101c1d74c31cea8cee31b5d39771 | [
"MIT"
] | null | null | null | from .pavlidis import pavlidis
| 15.5 | 30 | 0.83871 | 4 | 31 | 6.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 31 | 1 | 31 | 31 | 0.962963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ca39c14aa6087ed49829ad2df0bb647fff1716dc | 34 | py | Python | custom/__init__.py | 274869388/debug_dataloader | 6785250509189bf3ff2562a7cffcc9215d0bbe6f | [
"Apache-2.0"
] | null | null | null | custom/__init__.py | 274869388/debug_dataloader | 6785250509189bf3ff2562a7cffcc9215d0bbe6f | [
"Apache-2.0"
] | null | null | null | custom/__init__.py | 274869388/debug_dataloader | 6785250509189bf3ff2562a7cffcc9215d0bbe6f | [
"Apache-2.0"
] | null | null | null | from .aws_client import AWSBackend | 34 | 34 | 0.882353 | 5 | 34 | 5.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ca70e1caf721c375676f04ea88c23196c4ccdc50 | 24 | py | Python | project/__init__.py | morganzwest/bugtracker | 073a81fbfb4acc8a46a1942df51a4617adec3d58 | [
"MIT"
] | null | null | null | project/__init__.py | morganzwest/bugtracker | 073a81fbfb4acc8a46a1942df51a4617adec3d58 | [
"MIT"
] | null | null | null | project/__init__.py | morganzwest/bugtracker | 073a81fbfb4acc8a46a1942df51a4617adec3d58 | [
"MIT"
] | null | null | null | from connection import * | 24 | 24 | 0.833333 | 3 | 24 | 6.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 24 | 1 | 24 | 24 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ca995a61e08a6505c1561ac249dd7e0ee0004f44 | 217 | py | Python | code/hiking/devops/__init__.py | david-liu/hiking | a031ba66472809d2a01201fea9bdd5f12fcc19de | [
"Apache-2.0"
] | null | null | null | code/hiking/devops/__init__.py | david-liu/hiking | a031ba66472809d2a01201fea9bdd5f12fcc19de | [
"Apache-2.0"
] | 1 | 2018-11-07T08:33:17.000Z | 2018-11-07T08:33:17.000Z | code/hiking/devops/__init__.py | david-liu/hiking | a031ba66472809d2a01201fea9bdd5f12fcc19de | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from hiking.devops.cmdline import *
from hiking.devops import command_parser as command_parameters_parser
| 27.125 | 69 | 0.870968 | 29 | 217 | 5.931034 | 0.482759 | 0.174419 | 0.27907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.110599 | 217 | 7 | 70 | 31 | 0.891192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0.2 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
04936401df545155b827a0b9b46c1ed413724c7d | 28,952 | py | Python | scripts/Objects.py | AlfonsoXIII/chess_manager | bb0800c4992604a9c69c32ac91e65e97507ea1b0 | [
"MIT"
] | 1 | 2021-09-25T22:48:09.000Z | 2021-09-25T22:48:09.000Z | scripts/Objects.py | AlfonsoXIII/chess_manager | bb0800c4992604a9c69c32ac91e65e97507ea1b0 | [
"MIT"
] | null | null | null | scripts/Objects.py | AlfonsoXIII/chess_manager | bb0800c4992604a9c69c32ac91e65e97507ea1b0 | [
"MIT"
] | null | null | null | #Llibreries importades
import pygame
from PIL import Image
from copy import deepcopy
import numpy as np
#Classe per a la construcció de la peça Peó
class Pawn(pygame.sprite.Sprite):
def __init__(self, sprite, colour, pos, fliped, size):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.colour = colour #Color de peça
#Selecció i escalatge de l'imatge per a composar el seu sprite
self.image = sprite
self.image = self.image.resize(size, resample=Image.BILINEAR , box=None)
self.image = pygame.image.fromstring(self.image.tobytes(), self.image.size, self.image.mode)
self.fliped = fliped
self.pos = pos #Posició de la peça
self.id = "P" #ID de la peça
self.rect = self.image.get_rect() #Posició del collider del sprite en funció de les dimensions de la seva imatge
def Movement(self, board): #Funció que retorna una llista amb les caselles jugables per a la peça en concret
mv = []
k = (-1 if self.colour == 0 else 1)
if self.fliped == True: k = k*(-1)
if self.pos[0]+k <= 7:
if board[self.pos[0]+k][self.pos[1]] == "":
mv.append((self.pos[0]+k, self.pos[1]))
if self.pos[0] == (1 if k == 1 else 6) and board[self.pos[0]+(k*2)][self.pos[1]] == "":
mv.append((self.pos[0]+(k*2), self.pos[1]))
if self.pos[1]+1 <= 7 and board[self.pos[0]+k][self.pos[1]+1] != "" and board[self.pos[0]+k][self.pos[1]+1].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]+k, self.pos[1]+1))
if self.pos[1]-1 <= 7 and board[self.pos[0]+k][self.pos[1]-1] != "" and board[self.pos[0]+k][self.pos[1]-1].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]+k, self.pos[1]-1))
return mv
#Classe per a la construcció de la peça Dama
class Queen(pygame.sprite.Sprite):
def __init__(self, sprite, colour, pos, size):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.colour = colour #Color de peça
#Selecció i escalatge de l'imatge per a composar el seu sprite
self.image = sprite
self.image = self.image.resize(size, resample=Image.BILINEAR, box=None)
self.image = pygame.image.fromstring(self.image.tobytes(), self.image.size, self.image.mode)
self.pos = pos #Posició de la peça
self.id = "Q" #ID de la peça
self.rect = self.image.get_rect() #Posició del collider del sprite en funció de les dimensions de la seva imatge
def Movement(self, board): #Funció que retorna una llista amb les caselles jugables per a la peça en concret
mv = []
for x in range(1, 8):
if (self.pos[0]+x <= 7 and self.pos[1]+x <= 7) and board[self.pos[0]+x][self.pos[1]+x] == "":
mv.append((self.pos[0]+x, self.pos[1]+x))
else:
if (self.pos[0]+x <= 7 and self.pos[1]+x <= 7) and board[self.pos[0]+x][self.pos[1]+x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]+x, self.pos[1]+x))
break
for x in range(1, 8):
if (self.pos[0]+x <= 7 and self.pos[1]-x >= 0) and board[self.pos[0]+x][self.pos[1]-x] == "":
mv.append((self.pos[0]+x, self.pos[1]-x))
else:
if (self.pos[0]+x <= 7 and self.pos[1]-x >= 0) and board[self.pos[0]+x][self.pos[1]-x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]+x, self.pos[1]-x))
break
for x in range(1, 8):
if (self.pos[0]-x >= 0 and self.pos[1]-x >= 0) and board[self.pos[0]-x][self.pos[1]-x] == "":
mv.append((self.pos[0]-x, self.pos[1]-x))
else:
if (self.pos[0]-x >= 0 and self.pos[1]-x >= 0) and board[self.pos[0]-x][self.pos[1]-x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]-x, self.pos[1]-x))
break
for x in range(1, 8):
if (self.pos[0]-x >= 0 and self.pos[1]+x <= 7) and board[self.pos[0]-x][self.pos[1]+x] == "":
mv.append((self.pos[0]-x, self.pos[1]+x))
else:
if (self.pos[0]-x >= 0 and self.pos[1]+x <= 7) and board[self.pos[0]-x][self.pos[1]+x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]-x, self.pos[1]+x))
break
for x in range(self.pos[0]+1, 8):
if board[x][self.pos[1]] == "":
mv.append((x, self.pos[1]))
else:
if board[x][self.pos[1]].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((x, self.pos[1]))
break
for x in range(self.pos[0]-1, -1, -1):
if board[x][self.pos[1]] == "":
mv.append((x, self.pos[1]))
else:
if board[x][self.pos[1]].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((x, self.pos[1]))
break
for x in range(self.pos[1]+1, 8):
if board[self.pos[0]][x] == "":
mv.append((self.pos[0], x))
else:
if board[self.pos[0]][x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0], x))
break
for x in range(self.pos[1]-1, -1, -1):
if board[self.pos[0]][x] == "":
mv.append((self.pos[0], x))
else:
if board[self.pos[0]][x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0], x))
break
return mv
#Classe per a la construcció de la peça Cavall
class Knight(pygame.sprite.Sprite):
def __init__(self, sprite, colour, pos, size):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.colour = colour #Color de peça
#Selecció i escalatge de l'imatge per a composar el seu sprite
self.image = sprite
self.image = self.image.resize(size, resample=Image.BILINEAR, box=None)
self.image = pygame.image.fromstring(self.image.tobytes(), self.image.size, self.image.mode)
self.pos = pos #Posició de la peça
self.id = "N" #ID de la peça
self.rect = self.image.get_rect() #Posició del collider del sprite en funció de les dimensions de la seva imatge
def Movement(self, board): #Función que retorna las casillas disponibles para el movimiento de la pieza
mv = []
if self.pos[0]+2 <= 7 and self.pos[1]+1 <= 7:
if board[self.pos[0]+2][self.pos[1]+1] == "" or (board[self.pos[0]+2][self.pos[1]+1].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]+2, self.pos[1]+1))
if self.pos[0]+2 <= 7 and self.pos[1]-1 >= 0:
if board[self.pos[0]+2][self.pos[1]-1] == "" or (board[self.pos[0]+2][self.pos[1]-1].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]+2, self.pos[1]-1))
if self.pos[0]-2 >= 0 and self.pos[1]+1 <= 7:
if board[self.pos[0]-2][self.pos[1]+1] == "" or (board[self.pos[0]-2][self.pos[1]+1].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]-2, self.pos[1]+1))
if self.pos[0]-2 >= 0 and self.pos[1]-1 >= 0:
if board[self.pos[0]-2][self.pos[1]-1] == "" or (board[self.pos[0]-2][self.pos[1]-1].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]-2, self.pos[1]-1))
if self.pos[0]+1 <= 7 and self.pos[1]+2 <= 7:
if board[self.pos[0]+1][self.pos[1]+2] == "" or (board[self.pos[0]+1][self.pos[1]+2].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]+1, self.pos[1]+2))
if self.pos[0]+1 <= 7 and self.pos[1]-2 >= 0:
if board[self.pos[0]+1][self.pos[1]-2] == "" or (board[self.pos[0]+1][self.pos[1]-2].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]+1, self.pos[1]-2))
if self.pos[0]-1 >= 0 and self.pos[1]+2 <= 7:
if board[self.pos[0]-1][self.pos[1]+2] == "" or (board[self.pos[0]-1][self.pos[1]+2].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]-1, self.pos[1]+2))
if self.pos[0]-1 >= 0 and self.pos[1]-2 >= 0:
if board[self.pos[0]-1][self.pos[1]-2] == "" or (board[self.pos[0]-1][self.pos[1]-2].isupper() != board[self.pos[0]][self.pos[1]].isupper()): mv.append((self.pos[0]-1, self.pos[1]-2))
return mv
#Classe per a la construcció de la peça Àlfil
class Bishop(pygame.sprite.Sprite):
def __init__(self, sprite, colour, pos, size):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.colour = colour #Color de peça
#Selecció i escalatge de l'imatge per a composar el seu sprite
self.image = sprite
self.image = self.image.resize(size, resample=Image.BILINEAR, box=None)
self.image = pygame.image.fromstring(self.image.tobytes(), self.image.size, self.image.mode)
self.pos = pos #Posició de la peça
self.id = "B" #ID de la peça
self.rect = self.image.get_rect() #Posició del collider del sprite en funció de les dimensions de la seva imatge
def Movement(self, board): #Función que retorna las casillas disponibles para el movimiento de la pieza
mv = []
for x in range(1, 8):
if (self.pos[0]+x <= 7 and self.pos[1]+x <= 7) and board[self.pos[0]+x][self.pos[1]+x] == "":
mv.append((self.pos[0]+x, self.pos[1]+x))
else:
if (self.pos[0]+x <= 7 and self.pos[1]+x <= 7) and board[self.pos[0]+x][self.pos[1]+x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]+x, self.pos[1]+x))
break
for x in range(1, 8):
if (self.pos[0]+x <= 7 and self.pos[1]-x >= 0) and board[self.pos[0]+x][self.pos[1]-x] == "":
mv.append((self.pos[0]+x, self.pos[1]-x))
else:
if (self.pos[0]+x <= 7 and self.pos[1]-x >= 0) and board[self.pos[0]+x][self.pos[1]-x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]+x, self.pos[1]-x))
break
for x in range(1, 8):
if (self.pos[0]-x >= 0 and self.pos[1]-x >= 0) and board[self.pos[0]-x][self.pos[1]-x] == "":
mv.append((self.pos[0]-x, self.pos[1]-x))
else:
if (self.pos[0]-x >= 0 and self.pos[1]-x >= 0) and board[self.pos[0]-x][self.pos[1]-x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]-x, self.pos[1]-x))
break
for x in range(1, 8):
if (self.pos[0]-x >= 0 and self.pos[1]+x <= 7) and board[self.pos[0]-x][self.pos[1]+x] == "":
mv.append((self.pos[0]-x, self.pos[1]+x))
else:
if (self.pos[0]-x >= 0 and self.pos[1]+x <= 7) and board[self.pos[0]-x][self.pos[1]+x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0]-x, self.pos[1]+x))
break
return mv
#Classe per a la construcció de la peça Torre
class Rock(pygame.sprite.Sprite):
def __init__(self, sprite, colour, pos, size):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.colour = colour #Color de peça
#Selecció i escalatge de l'imatge per a composar el seu sprite
self.image = sprite
self.image = self.image.resize(size, resample=Image.BILINEAR, box=None)
self.image = pygame.image.fromstring(self.image.tobytes(), self.image.size, self.image.mode)
self.pos = pos #Posició de la peça
self.id = "R" #ID de la peça
self.h_moved = False #Atribut per a emmagatzemar si s'ha mogut
self.rect = self.image.get_rect() #Posició del collider del sprite en funció de les dimensions de la seva imatge
def Movement(self, board): #Función que retorna las casillas disponibles para el movimiento de la pieza
mv = []
for x in range(self.pos[0]+1, 8):
if board[x][self.pos[1]] == "":
mv.append((x, self.pos[1]))
else:
if board[x][self.pos[1]].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((x, self.pos[1]))
break
for x in range(self.pos[0]-1, -1, -1):
if board[x][self.pos[1]] == "":
mv.append((x, self.pos[1]))
else:
if board[x][self.pos[1]].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((x, self.pos[1]))
break
for x in range(self.pos[1]+1, 8):
if board[self.pos[0]][x] == "":
mv.append((self.pos[0], x))
else:
if board[self.pos[0]][x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0], x))
break
for x in range(self.pos[1]-1, -1, -1):
if board[self.pos[0]][x] == "":
mv.append((self.pos[0], x))
else:
if board[self.pos[0]][x].isupper() != board[self.pos[0]][self.pos[1]].isupper():
mv.append((self.pos[0], x))
break
return mv
#Classe per a la construcció de la peça Rei
class King(pygame.sprite.Sprite):
def __init__(self, sprite, colour, pos, fliped, size):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.colour = colour #Color de peça
#Selecció i escalatge de l'imatge per a composar el seu sprite
self.image = sprite.resize(size, resample=Image.BILINEAR, box=None)
self.image = pygame.image.fromstring(self.image.tobytes(), self.image.size, self.image.mode)
self.pos = pos #Posició de la peça
self.id = "K" #ID de la peça
self.h_moved = False #Atribut per a emmagatzemar si s'ha mogut
self.fliped = fliped #Atribut per a emmagatzemar si el taulell està rotat
self.rect = self.image.get_rect() #Posició del collider del sprite en funció de les dimensions de la seva imatge
def Check(self, board, pos):
local_id = board[pos[0]][pos[1]]
modificadores_diagonales = [(-1 , -1), (1, 1), (-1, 1), (1, -1)]
modificadores_lineales = [(1, 0), (0, 1), (-1, 0), (0, -1)]
agressiveKnight = 0
for x in range(1, 8):
temp_vectorlist_1 = deepcopy(modificadores_diagonales)
for y in range(0, len(modificadores_diagonales)):
temp_vect = np.array(pos)+(x*np.array(modificadores_diagonales[y]))
if 0 <= temp_vect[0] <= 7 and 0 <= temp_vect[1] <= 7 and (board[temp_vect[0]][temp_vect[1]].isupper() != board[pos[0]][pos[1]].isupper() or board[temp_vect[0]][temp_vect[1]] == ""):
if board[temp_vect[0]][temp_vect[1]] == ("p" if local_id.isupper() else "P"):
if x == 1 and (modificadores_diagonales[y] == ((-1, -1) if local_id.islower() else (-1, 1)) or modificadores_diagonales[y] == ((1, 1) if local_id.islower() else (1, -1))):
#print("Gate: 1 (DIAGONAL_1)")
return False
else:
temp_vectorlist_1.remove(modificadores_diagonales[y])
break
if (0 < x <= 2) and board[temp_vect[0]][temp_vect[1]] == ("n" if local_id.isupper() else "N"):
agressiveKnight += 1
temp_vectorlist_1.remove(modificadores_diagonales[y])
break
if x == 1 and board[temp_vect[0]][temp_vect[1]] == ("k" if local_id.isupper() else "K"):
#print("Gate: 1 (DIAGONAL_2)")
return False
if board[temp_vect[0]][temp_vect[1]] == ("q" if local_id.isupper() else "Q") or board[temp_vect[0]][temp_vect[1]] == ("b" if local_id.isupper() else "B"):
#print("Gate: 1 (DIAGONAL)")
return False
else:
temp_vectorlist_1.remove(modificadores_diagonales[y])
modificadores_diagonales = deepcopy(temp_vectorlist_1)
temp_vectorlist_2 = deepcopy(modificadores_lineales)
for y in range(0, len(modificadores_lineales)):
temp_vect = np.array(pos)+(x*np.array(modificadores_lineales[y]))
if 0 <= temp_vect[0] <= 7 and 0 <= temp_vect[1] <= 7 and (board[temp_vect[0]][temp_vect[1]].isupper() != board[pos[0]][pos[1]].isupper() or board[temp_vect[0]][temp_vect[1]] == ""):
if board[temp_vect[0]][temp_vect[1]] == ("p" if local_id.isupper() else "P"):
if x == 1 and 0 <= temp_vect[0]+(modificadores_lineales[y])[0] <= 7 and 0 <= temp_vect[1]+(modificadores_lineales[y])[1] <= 7:
if board[temp_vect[0]+(modificadores_lineales[y])[0]][temp_vect[1]+(modificadores_lineales[y])[1]] == ("n" if local_id.isupper() else "N"):
agressiveKnight += 1
temp_vectorlist_2.remove(modificadores_lineales[y])
break
else:
temp_vectorlist_2.remove(modificadores_lineales[y])
break
if (x != 0 and x <= 2) and board[temp_vect[0]][temp_vect[1]] == ("n" if local_id.isupper() else "N"):
agressiveKnight += 1
temp_vectorlist_2.remove(modificadores_lineales[y])
break
if x == 1 and board[temp_vect[0]][temp_vect[1]] == ("k" if local_id.isupper() else "K"):
#print("Gate: 2 (LINEAL_1)")
return False
if board[temp_vect[0]][temp_vect[1]] == ("q" if local_id.isupper() else "Q") or board[temp_vect[0]][temp_vect[1]] == ("r" if local_id.isupper() else "R"):
#print("Gate: 2 (LINEAL)")
return False
else:
temp_vectorlist_2.remove(modificadores_lineales[y])
modificadores_lineales = deepcopy(temp_vectorlist_2)
local_knights = 0
for a in range(0, 8):
for b in range(0, 8):
if board[b][a] == ("n" if local_id.isupper() else "N") and pos[0]-2 <= b <= pos[0]+2 and pos[1]-2 <= a <= pos[1]+2:
local_knights += 1
if local_knights != agressiveKnight:
#print("Gate: 2 (HORSE)")
return False
#print("Gate: 3 (No check)")
return True
def Castling(self, board, h_moved):
local_castling = (False, False)
k = (1 if self.fliped == False else -1)
if h_moved == False and 0 <= self.pos[1]+(2*k) <= 7:
f_board_1 = deepcopy(board)
f_board_1[self.pos[0]][self.pos[1]+(1*k)] = "K" if self.colour == 0 else "k"
f_board_1[self.pos[0]][self.pos[1]] = ""
f_board_2 = deepcopy(board)
f_board_2[self.pos[0]][self.pos[1]+(2*k)] = "K" if self.colour == 0 else "k"
f_board_2[self.pos[0]][self.pos[1]] = ""
if (board[self.pos[0]][self.pos[1]+(1*k)] == "" and King.Check(self, f_board_1, (self.pos[0], self.pos[1]+(1*k)))) and (board[self.pos[0]][self.pos[1]+(2*k)] == "" and King.Check(self, f_board_2, (self.pos[0], self.pos[1]+(2*k)))) and board[self.pos[0]][self.pos[1]+(3*k)] == ("R" if board[self.pos[0]][self.pos[1]].isupper() else "r"):
local_castling = (True, local_castling[1])
if h_moved == False and 0 <= self.pos[1]+(-3*k) <= 7:
f_board = deepcopy(board)
f_board[self.pos[0]][self.pos[1]-1*k] = "K" if self.colour == 0 else "k"
f_board[self.pos[0]][self.pos[1]] = ""
f_board_1 = deepcopy(board)
f_board_1[self.pos[0]][self.pos[1]-2*k] = ("K" if self.colour == 0 else "k")
f_board_1[self.pos[0]][self.pos[1]] = ""
f_board_2 = deepcopy(board)
f_board_2[self.pos[0]][self.pos[1]-3*k] = "K" if self.colour == 0 else "k"
f_board_2[self.pos[0]][self.pos[1]] = ""
if (board[self.pos[0]][self.pos[1]-1*k] == "" and King.Check(self, f_board, (self.pos[0], self.pos[1]-1*k))) and (board[self.pos[0]][self.pos[1]-2*k] == "" and King.Check(self, f_board_1, (self.pos[0], self.pos[1]-2*k))) and (board[self.pos[0]][self.pos[1]-3*k] == "" and King.Check(self, f_board_2, (self.pos[0], self.pos[1]-3*k))) and board[self.pos[0]][self.pos[1]-4*k] == ("R" if board[self.pos[0]][self.pos[1]].isupper() else "r"):
local_castling = (local_castling[0], True)
return local_castling
def Movement(self, board): #Función que retorna las casillas disponibles para el movimiento de la pieza
mv = []
k = 1 if self.colour == 1 else -1
if self.pos[0]+1 <= 7 and (board[self.pos[0]+1][self.pos[1]].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]+1][self.pos[1]] == ""):
mv.append((self.pos[0]+1, self.pos[1]))
if self.pos[0]-1 >= 0 and (board[self.pos[0]-1][self.pos[1]].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]-1][self.pos[1]] == ""):
mv.append((self.pos[0]-1, self.pos[1]))
if self.pos[1]+1 <= 7 and (board[self.pos[0]][self.pos[1]+1].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]][self.pos[1]+1] == ""):
mv.append((self.pos[0], self.pos[1]+1))
if self.pos[1]-1 >= 0 and (board[self.pos[0]][self.pos[1]-1].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]][self.pos[1]-1] == ""):
mv.append((self.pos[0], self.pos[1]-1))
if self.pos[0]+1 <= 7 and self.pos[1]+1 <= 7 and (board[self.pos[0]+1][self.pos[1]+1].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]+1][self.pos[1]+1] == ""):
mv.append((self.pos[0]+1, self.pos[1]+1))
if self.pos[0]+1 <= 7 and self.pos[1]-1 >= 0 and (board[self.pos[0]+1][self.pos[1]-1].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]+1][self.pos[1]-1] == ""):
mv.append((self.pos[0]+1, self.pos[1]-1))
if self.pos[0]-1 >= 0 and self.pos[1]+1 <= 7 and (board[self.pos[0]-1][self.pos[1]+1].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]-1][self.pos[1]+1] == ""):
mv.append((self.pos[0]-1, self.pos[1]+1))
if self.pos[0]-1 >= 0 and self.pos[1]-1 >= 0 and (board[self.pos[0]-1][self.pos[1]-1].isupper() != board[self.pos[0]][self.pos[1]].isupper() or board[self.pos[0]-1][self.pos[1]-1] == ""):
mv.append((self.pos[0]-1, self.pos[1]-1))
return mv
class Menu(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.status = []
self.im = 0
images = Image.open("images/manu_pressed.png")
self.image1 = images.crop((0, 146.5, 150, 293))
self.image1 = self.image1.resize((70, 70), resample=Image.BILINEAR, box=None)
self.image1 = pygame.image.fromstring(self.image1.tobytes(), self.image1.size, self.image1.mode)
self.status.append(self.image1)
self.image2 = images.crop((0, 0, 150, 144))
self.image2 = self.image2.resize((70, 70), resample=Image.BILINEAR, box=None)
self.image2 = pygame.image.fromstring(self.image2.tobytes(), self.image2.size, self.image2.mode)
self.status.append(self.image2)
self.image = self.status[self.im]
self.k = 2
self.rect = self.image.get_rect()
def Update(self):
self.im = 1 if self.im == 0 else 0
self.image = self.status[self.im]
class Render_Image(pygame.sprite.Sprite):
def __init__(self, image_path, image_size, k):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.status = [] #Llista per a recollir les dues imatges del botó (encés/apagat)
self.im = 0 #Posició (encés/apagat) del botó
image = Image.open(image_path) #Obrim l'imatge amb el mòdul PIL
#Selecció i escalatge de l'imatge 1
self.image1 = image.resize(image_size, resample=Image.BILINEAR, box=None)
self.image1 = pygame.image.fromstring(self.image1.tobytes(), self.image1.size, self.image1.mode)
self.status.append(self.image1)
self.image = self.image1 #Determinem la imatge en funció del seu estatus o posició
self.id = k #ID del botó
self.rect = self.image.get_rect() #Posició del collider del botó
#Classe per a la construcció d'un botó
class Button(pygame.sprite.Sprite):
def __init__(self, image_object, image1_crop, image2_crop, image_size, k):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.status = [] #Llista per a recollir les dues imatges del botó (encés/apagat)
self.im = 0 #Posició (encés/apagat) del botó
image = image_object
#Selecció i escalatge de l'imatge 1
self.image1 = image.crop(image1_crop)
self.image1 = self.image1.resize(image_size, resample=Image.BILINEAR, box=None)
self.image1 = pygame.image.fromstring(self.image1.tobytes(), self.image1.size, self.image1.mode)
self.status.append(self.image1)
#Selecció i escalatge de l'imatge 1
self.image2 = image.crop(image2_crop)
self.image2 = self.image2.resize(image_size, resample=Image.BILINEAR, box=None)
self.image2 = pygame.image.fromstring(self.image2.tobytes(), self.image2.size, self.image2.mode)
self.status.append(self.image2)
self.image = self.status[self.im] #Determinem la imatge en funció del seu estatus o posició
self.id = k #ID del botó
self.rect = self.image.get_rect() #Posició del collider del botó
def Update(self): #Funció per a actualitzar la posició de la imatge (encés/apagat)
self.im = 1 if self.im == 0 else 0
self.image = self.status[self.im]
class Arrow_Button(pygame.sprite.Sprite):
def __init__(self, sprite, image1_crop, image_size, k, proportion):
super().__init__() #Herència dels atributs de la classe Sprite de pygame
#Atributs de classe
self.status = {} #Llista per a recollir les dues imatges del botó (encés/apagat)
self.im = 0 #Posició (encés/apagat) del botó
image = sprite
temp_list = []
for a in range(0, 21):
#Selecció i escalatge de l'imatge 1
self.image1 = image.crop(image1_crop)
self.image1 = self.image1.resize(image_size, resample=Image.BILINEAR, box=None)
self.image1 = self.image1.rotate(a*9, expand=False)
self.image1 = pygame.image.fromstring(self.image1.tobytes(), self.image1.size, self.image1.mode)
temp_list.append(self.image1)
self.status["0"] = temp_list
self.image = (self.status["0"])[self.im] #Determinem la imatge en funció del seu estatus o posició
self.id = k #ID del botó
self.rect = self.image.get_rect() #Posició del collider del botó
#self.rect.center = (pos[0]+(image_size[0]/2), pos[1]+(image_size[1]/2))
def Update(self, direction): #Funció per a actualitzar la posició de la imatge (encés/apagat)
if 0 <= self.im <= 20:
self.im += (direction)
self.image = self.status["0"][self.im] | 49.575342 | 448 | 0.544971 | 4,442 | 28,952 | 3.492121 | 0.044575 | 0.184116 | 0.104177 | 0.090511 | 0.90575 | 0.891632 | 0.88409 | 0.863847 | 0.848182 | 0.831227 | 0 | 0.044599 | 0.282088 | 28,952 | 584 | 449 | 49.575342 | 0.701708 | 0.129525 | 0 | 0.655779 | 0 | 0 | 0.002788 | 0.000916 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052764 | false | 0 | 0.01005 | 0 | 0.123116 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
049c052d7ab09e1a334a611e68038b1db9c3d7c9 | 136 | py | Python | venv/src/home/views.py | AkashSDas/Bloare | 20d0f56252346b4891c3cb62acaf8d20e0d3a7b7 | [
"MIT"
] | null | null | null | venv/src/home/views.py | AkashSDas/Bloare | 20d0f56252346b4891c3cb62acaf8d20e0d3a7b7 | [
"MIT"
] | 15 | 2021-04-08T19:53:30.000Z | 2022-03-12T00:50:04.000Z | venv/src/home/views.py | AkashSDas/Bloare | 20d0f56252346b4891c3cb62acaf8d20e0d3a7b7 | [
"MIT"
] | null | null | null | from django.shortcuts import render
def home_view(request, *args, **kwargs):
return render(request, 'home/landing-page.html', {})
| 22.666667 | 56 | 0.720588 | 18 | 136 | 5.388889 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132353 | 136 | 5 | 57 | 27.2 | 0.822034 | 0 | 0 | 0 | 0 | 0 | 0.161765 | 0.161765 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
b6c4525f028a6ad78a4a2f7cb88388bb9ea7d9f3 | 8,317 | py | Python | tests/adapters/repository/sqlalchemy_repo/sqlite/test_transactions.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | tests/adapters/repository/sqlalchemy_repo/sqlite/test_transactions.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | tests/adapters/repository/sqlalchemy_repo/sqlite/test_transactions.py | nadirhamid/protean | d31bc634e05c9221e82136bf18c2ceaa0982c1c8 | [
"BSD-3-Clause"
] | null | null | null | # Standard Library Imports
import random
import string
# Protean
import pytest
from protean.core.exceptions import ObjectNotFoundError
from protean.core.unit_of_work import UnitOfWork
# Local/Relative Imports
from .elements import Person, PersonRepository
@pytest.mark.sqlite
class TestTransactions:
@pytest.fixture(autouse=True)
def register_elements(self, test_domain):
test_domain.register(Person)
test_domain.register(PersonRepository, aggregate_cls=Person)
def random_name(self):
return "".join(random.choices(string.ascii_uppercase + string.digits, k=15))
def persisted_person(self, test_domain):
repo = test_domain.repository_for(Person)
person = Person(first_name=self.random_name(), last_name=self.random_name())
repo.add(person)
return person
def test_new_objects_are_committed_as_part_of_one_transaction(self, test_domain):
# Add a Person the database
repo = test_domain.repository_for(Person)
repo.add(self.persisted_person(test_domain))
person_dao = test_domain.get_dao(Person)
# Initiate a UnitOfWork Session
with UnitOfWork():
repo = test_domain.repository_for(Person)
person2 = Person(first_name="Jane", last_name="Doe")
repo.add(person2)
# Test that the underlying database is untouched
assert len(person_dao.outside_uow().query.all().items) == 1
assert len(person_dao.query.all().items) == 2
def test_updated_objects_are_committed_as_part_of_one_transaction(
self, test_domain
):
# Add a Person the database
repo = test_domain.repository_for(Person)
person = Person(first_name="John", last_name="Doe")
repo.add(person)
person_dao = test_domain.get_dao(Person)
# Initiate a UnitOfWork Session
with UnitOfWork():
repo = test_domain.repository_for(Person)
persisted_person = repo.get(person.id)
persisted_person.last_name = "Dane"
repo.add(persisted_person)
# Test that the underlying database is untouched
assert person_dao.outside_uow().find_by(id=person.id).last_name == "Doe"
assert person_dao.get(person.id).last_name == "Dane"
def test_deleted_objects_are_committed_as_part_of_one_transaction(
self, test_domain
):
# Add a Person the database
repo = test_domain.repository_for(Person)
person_to_be_added = self.persisted_person(test_domain)
repo.add(person_to_be_added)
person_dao = test_domain.get_dao(Person)
# Initiate a UnitOfWork Session
with UnitOfWork():
repo = test_domain.repository_for(Person)
persisted_person = repo.get(person_to_be_added.id)
repo.remove(persisted_person)
# Test that the underlying database is untouched
assert len(person_dao.outside_uow().query.all().items) == 1
assert len(person_dao.query.all().items) == 0
def test_changed_objects_are_committed_as_part_of_one_transaction(
self, test_domain
):
# Add a Person the database
repo = test_domain.repository_for(Person)
person_to_be_updated = self.persisted_person(test_domain)
person_to_be_deleted = self.persisted_person(test_domain)
repo.add(person_to_be_updated)
repo.add(person_to_be_deleted)
person_dao = test_domain.get_dao(Person)
# Initiate a UnitOfWork Session
with UnitOfWork():
repo_with_uow = test_domain.repository_for(Person)
# Create a new person object to be added
person_to_be_added = Person(first_name="John", last_name="Doe")
repo_with_uow.add(person_to_be_added)
# Update an existing Person record
person_to_be_updated.last_name = "FooBar"
repo_with_uow.add(person_to_be_updated)
# Remove an existing Person record
repo_with_uow.remove(person_to_be_deleted)
# Test that the underlying database is untouched
assert len(person_dao.query.all().items) == 2
assert (
person_dao.outside_uow().get(person_to_be_updated.id).last_name
!= "FooBar"
)
assert person_dao.get(person_to_be_deleted.id) is not None
assert len(person_dao.query.all().items) == 2
assert person_dao.get(person_to_be_added.id) is not None
assert person_dao.get(person_to_be_updated.id).last_name == "FooBar"
with pytest.raises(ObjectNotFoundError):
person_dao.get(person_to_be_deleted.id)
def test_changed_objects_are_committed_as_part_of_one_transaction_on_explict_commit(
self, test_domain
):
# Add a Person the database
repo = test_domain.repository_for(Person)
person_to_be_updated = self.persisted_person(test_domain)
person_to_be_deleted = self.persisted_person(test_domain)
repo.add(person_to_be_updated)
repo.add(person_to_be_deleted)
person_dao = test_domain.get_dao(Person)
# Initiate a UnitOfWork Session
uow = UnitOfWork()
uow.start()
repo_with_uow = test_domain.repository_for(Person)
# Create a new person object to be added
person_to_be_added = Person(first_name="John", last_name="Doe")
repo_with_uow.add(person_to_be_added)
# Update an existing Person record
person_to_be_updated.last_name = "FooBar"
repo_with_uow.add(person_to_be_updated)
# Remove an existing Person record
repo_with_uow.remove(person_to_be_deleted)
# Test that the underlying database is untouched
assert len(person_dao.query.all().items) == 2
assert (
person_dao.outside_uow().get(person_to_be_updated.id).last_name != "FooBar"
)
assert person_dao.get(person_to_be_deleted.id) is not None
uow.commit()
assert uow.in_progress is False
assert len(person_dao.query.all().items) == 2
assert person_dao.get(person_to_be_added.id) is not None
assert person_dao.get(person_to_be_updated.id).last_name == "FooBar"
with pytest.raises(ObjectNotFoundError):
person_dao.get(person_to_be_deleted.id)
def test_all_changes_are_discarded_on_rollback(self, test_domain):
repo = test_domain.repository_for(Person)
person_to_be_updated = self.persisted_person(test_domain)
person_to_be_deleted = self.persisted_person(test_domain)
repo.add(person_to_be_updated)
repo.add(person_to_be_deleted)
person_dao = test_domain.get_dao(Person)
# Initiate a UnitOfWork Session
uow = UnitOfWork()
uow.start()
repo_with_uow = test_domain.repository_for(Person)
# Create a new person object to be added
person_to_be_added = Person(first_name="John", last_name="Doe")
repo_with_uow.add(person_to_be_added)
# Update an existing Person record
person_to_be_updated.last_name = "FooBar"
repo_with_uow.add(person_to_be_updated)
# Remove an existing Person record
repo_with_uow.remove(person_to_be_deleted)
# Test that the underlying database is untouched
assert len(person_dao.query.all().items) == 2
assert (
person_dao.outside_uow().get(person_to_be_updated.id).last_name != "FooBar"
)
assert person_dao.get(person_to_be_deleted.id) is not None
uow.rollback()
assert uow.in_progress is False
assert len(person_dao.query.all().items) == 2
assert person_dao.get(person_to_be_updated.id).last_name != "FooBar"
assert person_dao.get(person_to_be_deleted.id) is not None
def test_session_is_destroyed_after_commit(self, test_domain):
uow = UnitOfWork()
uow.start()
uow.commit()
assert uow._sessions == {}
assert uow.in_progress is False
def test_session_is_destroyed_after_rollback(self, test_domain):
uow = UnitOfWork()
uow.start()
uow.rollback()
assert uow._sessions == {}
assert uow.in_progress is False
| 35.241525 | 88 | 0.675243 | 1,101 | 8,317 | 4.765668 | 0.108084 | 0.03583 | 0.083857 | 0.058319 | 0.846388 | 0.833238 | 0.81418 | 0.81418 | 0.795883 | 0.779493 | 0 | 0.002231 | 0.245521 | 8,317 | 235 | 89 | 35.391489 | 0.833944 | 0.115667 | 0 | 0.697987 | 0 | 0 | 0.013652 | 0 | 0 | 0 | 0 | 0 | 0.201342 | 1 | 0.073826 | false | 0 | 0.040268 | 0.006711 | 0.134228 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8e1a7eb441b19ea7cddb216bffd406ce0cb7897e | 44 | py | Python | aoc/common/__init__.py | klittlepage/aoc2020 | 7135ac08263480a8cc9d6536d7caeb26bf85ae4f | [
"MIT"
] | null | null | null | aoc/common/__init__.py | klittlepage/aoc2020 | 7135ac08263480a8cc9d6536d7caeb26bf85ae4f | [
"MIT"
] | null | null | null | aoc/common/__init__.py | klittlepage/aoc2020 | 7135ac08263480a8cc9d6536d7caeb26bf85ae4f | [
"MIT"
] | null | null | null | from aoc.common.helpers import read_chunked
| 22 | 43 | 0.863636 | 7 | 44 | 5.285714 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 44 | 1 | 44 | 44 | 0.925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8e3a86898740a1d8310e9eae1d382962a5bda3f8 | 173 | py | Python | custom-outset/pkgroot/usr/local/outset/boot-every/boot-every_example.py | flammable/outset | c1b21b7c9814b6c0cf868c09c1dbcc743e2d1f7d | [
"Apache-2.0"
] | 533 | 2015-01-02T05:41:55.000Z | 2022-03-30T22:34:57.000Z | custom-outset/pkgroot/usr/local/outset/boot-every/boot-every_example.py | flammable/outset | c1b21b7c9814b6c0cf868c09c1dbcc743e2d1f7d | [
"Apache-2.0"
] | 80 | 2015-02-16T11:52:31.000Z | 2022-01-21T01:52:46.000Z | custom-outset/pkgroot/usr/local/outset/boot-every/boot-every_example.py | flammable/outset | c1b21b7c9814b6c0cf868c09c1dbcc743e2d1f7d | [
"Apache-2.0"
] | 95 | 2015-02-10T21:12:39.000Z | 2022-03-25T10:00:34.000Z | #!/usr/bin/python
# Replace this script with your scripts, profiles, and/or packages
# which you want to run at every boot.
print("These scripts will run at every boot.")
| 24.714286 | 66 | 0.739884 | 29 | 173 | 4.413793 | 0.827586 | 0.078125 | 0.15625 | 0.21875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16763 | 173 | 6 | 67 | 28.833333 | 0.888889 | 0.682081 | 0 | 0 | 0 | 0 | 0.711538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
6d617e44072a8df7827e8485dba93ffcfedb28b6 | 148 | py | Python | bittrex_signalr/__init__.py | r3bers/custom-bittrex-signalr | ffc5fcd56ac9ead4db305036f01a50b192bd9003 | [
"MIT"
] | null | null | null | bittrex_signalr/__init__.py | r3bers/custom-bittrex-signalr | ffc5fcd56ac9ead4db305036f01a50b192bd9003 | [
"MIT"
] | null | null | null | bittrex_signalr/__init__.py | r3bers/custom-bittrex-signalr | ffc5fcd56ac9ead4db305036f01a50b192bd9003 | [
"MIT"
] | null | null | null | from bittrex_signalr import _logger
from bittrex_signalr.websocket_client import BittrexSocket
from bittrex_signalr.constants import BittrexMethods
| 37 | 58 | 0.905405 | 18 | 148 | 7.166667 | 0.555556 | 0.255814 | 0.418605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 148 | 3 | 59 | 49.333333 | 0.948529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
6d665d9abaae97abcc886109fc28f27bd3c0947a | 335 | py | Python | allure-nose2/test/with_mp/test_pm.py | bhumikapaharia/allure-python | b571b9bfc80af6f0431062ee83425e62d90163e4 | [
"Apache-2.0"
] | 558 | 2015-03-14T18:26:56.000Z | 2022-02-21T00:09:49.000Z | allure-nose2/test/with_mp/test_pm.py | bhumikapaharia/allure-python | b571b9bfc80af6f0431062ee83425e62d90163e4 | [
"Apache-2.0"
] | 448 | 2015-01-09T10:00:47.000Z | 2022-03-24T15:25:02.000Z | allure-nose2/test/with_mp/test_pm.py | bhumikapaharia/allure-python | b571b9bfc80af6f0431062ee83425e62d90163e4 | [
"Apache-2.0"
] | 244 | 2015-01-26T08:03:11.000Z | 2022-03-07T17:06:30.000Z | # Todo test mp
from test.example_runner import run_docstring_example
def test_func_fullname():
"""
>>> def test_func_fullname_example1():
... pass
>>> def test_func_fullname_example2():
... pass
>>> def test_func_fullname_example3():
... pass
"""
allure_report = run_docstring_example() | 25.769231 | 53 | 0.644776 | 38 | 335 | 5.236842 | 0.473684 | 0.140704 | 0.221106 | 0.38191 | 0.231156 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011673 | 0.232836 | 335 | 13 | 54 | 25.769231 | 0.762646 | 0.504478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ed9947e4aeb3fbe9bb216b5e139b1076aff08109 | 118 | py | Python | deepnade/buml/NADE/__init__.py | vlimant/NADE | e2446c73250a99979c8710a8acbb14823a54bce0 | [
"BSD-3-Clause"
] | 43 | 2017-06-19T21:19:55.000Z | 2022-02-06T01:21:48.000Z | deepnade/buml/NADE/__init__.py | vlimant/NADE | e2446c73250a99979c8710a8acbb14823a54bce0 | [
"BSD-3-Clause"
] | 1 | 2017-08-29T14:09:49.000Z | 2017-09-08T12:34:19.000Z | deepnade/buml/NADE/__init__.py | vlimant/NADE | e2446c73250a99979c8710a8acbb14823a54bce0 | [
"BSD-3-Clause"
] | 12 | 2017-09-12T07:56:13.000Z | 2021-09-19T19:11:41.000Z | from BernoulliNADE import *
from MoGNADE import *
from OrderlessBernoulliNADE import *
from OrderlessMoGNADE import *
| 23.6 | 36 | 0.830508 | 12 | 118 | 8.166667 | 0.5 | 0.306122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135593 | 118 | 4 | 37 | 29.5 | 0.960784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
edc59e7ffcdd87f295764d347850d04b2af3d79b | 797 | py | Python | src/Card.py | codavex/Cards | 7dba1c219c7188f176f152c2671096e16accbc66 | [
"MIT"
] | null | null | null | src/Card.py | codavex/Cards | 7dba1c219c7188f176f152c2671096e16accbc66 | [
"MIT"
] | null | null | null | src/Card.py | codavex/Cards | 7dba1c219c7188f176f152c2671096e16accbc66 | [
"MIT"
] | null | null | null | class Card:
def __init__(self, rank, suit):
self._rank = rank
self._suit = suit
def __repr__(self):
return "%s%s" % (repr(self._rank), self._suit.name)
def __str__(self):
return "%s of %s" % (str(self._rank), self._suit.value)
def __eq__(self, other):
return self._rank == other._rank
def __ne__(self, other):
return self._rank != other._rank
def __lt__(self, other):
return self._rank < other._rank
def __le__(self, other):
return self._rank <= other._rank
def __gt__(self, other):
return self._rank > other._rank
def __ge__(self, other):
return self._rank >= other._rank
def getRank(self):
return self._rank
def getSuit(self):
return self._suit
| 22.771429 | 63 | 0.593476 | 104 | 797 | 4.009615 | 0.221154 | 0.211031 | 0.235012 | 0.273381 | 0.503597 | 0.503597 | 0.503597 | 0.503597 | 0 | 0 | 0 | 0 | 0.286073 | 797 | 34 | 64 | 23.441176 | 0.732865 | 0 | 0 | 0 | 0 | 0 | 0.015056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.458333 | false | 0 | 0 | 0.416667 | 0.916667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
eddf9244d02922ceac3322d9a53346f7976639a1 | 7,218 | py | Python | neurora/ctrdm_corr.py | ZitongLu1996/NeuroRA | 4e72f5b37ff308a4a068107b35f7555df6b7df0d | [
"MIT"
] | 110 | 2019-04-30T03:52:48.000Z | 2022-03-19T08:23:38.000Z | neurora/ctrdm_corr.py | ZitongLu1996/NeuroRA | 4e72f5b37ff308a4a068107b35f7555df6b7df0d | [
"MIT"
] | 2 | 2020-07-23T14:31:30.000Z | 2022-01-14T08:30:00.000Z | neurora/ctrdm_corr.py | ZitongLu1996/NeuroRA | 4e72f5b37ff308a4a068107b35f7555df6b7df0d | [
"MIT"
] | 20 | 2020-03-02T11:58:30.000Z | 2021-12-31T08:29:53.000Z | # -*- coding: utf-8 -*-
' a module for calculating the Similarity/Correlation Coefficient between two Cross-temporal RDMs '
__author__ = 'Zitong Lu'
import numpy as np
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from scipy.stats import kendalltau
from neurora.stuff import permutation_corr
' a function for calculating the Spearman correlation coefficient between two CTRDMs '
def ctrdm_correlation_spearman(CTRDM1, CTRDM2):
"""
Calculate the similarity based on Spearman Correlation Coefficient between two CTRDMs
Parameters
----------
CTRDM1 : array [n_conditions, n_conditions]
The CTRDM 1.
The shape of CTRDM1 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
CTRDM2 : array [n_conditions, n_conditions]
The CTRDM 2.
The shape of CTRDM2 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
Returns
-------
corr : array [r, p].
The Spearman Correlation result.
The shape of corr is [2], including a r-value and a p-value.
"""
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Spearman Correlation
corr = np.array(spearmanr(v1, v2))
return corr
' a function for calculating the similarity based on Pearson Correlation Coefficient between two CTRDMs '
def ctrdm_correlation_pearson(CTRDM1, CTRDM2):
"""
Calculate the similarity based on Pearson Correlation Coefficient between two CTRDMs
Parameters
----------
CTRDM1 : array [n_conditions, n_conditions]
The CTRDM 1.
The shape of CTRDM1 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
CTRDM2 : array [n_conditions, n_conditions]
The CTRDM 2.
The shape of CTRDM2 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
Returns
-------
corr : array [r, p].
The Pearson Correlation result.
The shape of corr is [2], including a r-value and a p-value.
"""
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Pearson Correlation
corr = np.array(pearsonr(v1, v2))
return corr
' a function for calculating the similarity based on Kendalls tau Correlation Coefficient between two CTRDMs '
def ctrdm_correlation_kendall(CTRDM1, CTRDM2):
"""
Calculate the similarity based on Kendalls tau Correlation Coefficient between two CTRDMs
Parameters
----------
CTRDM1 : array [n_conditions, n_conditions]
The CTRDM 1.
The shape of CTRDM1 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
CTRDM2 : array [n_conditions, n_conditions]
The CTRDM 2.
The shape of CTRDM2 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
Returns
-------
corr : array [r, p].
The Kendalls tau Correlation result.
The shape of corr is [2], including a r-value and a p-value.
"""
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Kendalls tau Correlation
corr = np.array(kendalltau(v1, v2))
return corr
def ctrdm_similarity(CTRDM1, CTRDM2):
"""
Calculate the similarity based on Cosine Similarity between two CTRDMs
Parameters
----------
CTRDM1 : array [n_conditions, n_conditions]
The CTRDM 1.
The shape of CTRDM1 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
CTRDM2 : array [n_conditions, n_conditions]
The CTRDM 2.
The shape of CTRDM2 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
Returns
-------
similarity : float
The Cosine Similarity result.
"""
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Cosine Similarity
V1 = np.mat(v1)
V2 = np.mat(v2)
num = float(V1 * V2.T)
denom = np.linalg.norm(V1) * np.linalg.norm(V2)
cos = num / denom
similarity = 0.5 + 0.5 * cos
return similarity
' a function for calculating the similarity based on Euclidean Distance between two CTRDMs '
def ctrdm_distance(CTRDM1, CTRDM2):
"""
Calculate the similarity based on Euclidean Distance between two CTRDMs
Parameters
----------
CTRDM1 : array [n_conditions, n_conditions]
The CTRDM 1.
The shape of CTRDM1 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
CTRDM2 : array [n_conditions, n_conditions]
The CTRDM 2.
The shape of CTRDM2 must be [n_cons, n_cons]. n_cons represent the number of conidtions.
Returns
-------
dist : float.
The Euclidean Distance result.
"""
# get number of conditions
n_cons = np.shape(CTRDM1)[0]
# calculate the number of value above the diagonal in RDM
n = n_cons * (n_cons - 1)
# initialize two vectors to store the values above the diagnal of two RDMs
v1 = np.zeros([n], dtype=np.float64)
v2 = np.zeros([n], dtype=np.float64)
# assignment
nn = 0
for i in range(n_cons):
for j in range(n_cons):
if i != j:
v1[nn] = CTRDM1[i, j]
v2[nn] = CTRDM2[i, j]
nn = nn + 1
# calculate the Euclidean Distance
dist = np.linalg.norm(v1 - v2)
return dist | 29.104839 | 110 | 0.622749 | 1,034 | 7,218 | 4.262089 | 0.102515 | 0.062401 | 0.034037 | 0.056728 | 0.841389 | 0.839573 | 0.829135 | 0.796233 | 0.769458 | 0.749036 | 0 | 0.027011 | 0.28706 | 7,218 | 248 | 111 | 29.104839 | 0.829382 | 0.517595 | 0 | 0.674419 | 0 | 0 | 0.152154 | 0.006817 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05814 | false | 0 | 0.05814 | 0 | 0.174419 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
edf937fa1fee8c19ee67f57144045b7f63a92424 | 44 | py | Python | dist/python/tests/__init__.py | natir/ssik | 873cdc5b2e3c6b3f03191e26506ccb5e6d4b4d89 | [
"MIT"
] | 1 | 2019-02-07T10:23:18.000Z | 2019-02-07T10:23:18.000Z | dist/python/tests/__init__.py | natir/pcon | d198a8d8e4469bc39b5bedde95e4b71a1f95ef81 | [
"MIT"
] | 1 | 2019-02-01T17:02:45.000Z | 2019-02-08T21:03:30.000Z | dist/python/tests/__init__.py | natir/ssik | 873cdc5b2e3c6b3f03191e26506ccb5e6d4b4d89 | [
"MIT"
] | 1 | 2019-11-04T09:17:59.000Z | 2019-11-04T09:17:59.000Z | import sys
sys.path.pop(0)
print(sys.path)
| 8.8 | 15 | 0.727273 | 9 | 44 | 3.555556 | 0.666667 | 0.4375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 0.113636 | 44 | 4 | 16 | 11 | 0.794872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0.333333 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
edfd85b132ad1d9089aec68bee79b6df772c2acd | 43 | py | Python | plexiglass/detectors/__init__.py | jkartzman/plexiglass | 257e3305e31f032c26300b0a9c78260ccd251cd6 | [
"MIT"
] | null | null | null | plexiglass/detectors/__init__.py | jkartzman/plexiglass | 257e3305e31f032c26300b0a9c78260ccd251cd6 | [
"MIT"
] | null | null | null | plexiglass/detectors/__init__.py | jkartzman/plexiglass | 257e3305e31f032c26300b0a9c78260ccd251cd6 | [
"MIT"
] | null | null | null | from .mesonet import MesoNet, MesoInception | 43 | 43 | 0.860465 | 5 | 43 | 7.4 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 43 | 1 | 43 | 43 | 0.948718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b64177da058906a3550257b7db628aa55a074652 | 107,496 | py | Python | sdk/python/pulumi_spotinst/azure/_inputs.py | pulumi/pulumi-spotinst | 75592d6293d63f6cec703722f2e02ff1fb1cca44 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2019-12-21T20:50:43.000Z | 2021-12-01T20:57:38.000Z | sdk/python/pulumi_spotinst/azure/_inputs.py | pulumi/pulumi-spotinst | 75592d6293d63f6cec703722f2e02ff1fb1cca44 | [
"ECL-2.0",
"Apache-2.0"
] | 103 | 2019-12-09T22:03:16.000Z | 2022-03-30T17:07:34.000Z | sdk/python/pulumi_spotinst/azure/_inputs.py | pulumi/pulumi-spotinst | 75592d6293d63f6cec703722f2e02ff1fb1cca44 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ElastigroupHealthCheckArgs',
'ElastigroupImageArgs',
'ElastigroupImageCustomArgs',
'ElastigroupImageMarketplaceArgs',
'ElastigroupIntegrationKubernetesArgs',
'ElastigroupIntegrationMultaiRuntimeArgs',
'ElastigroupLoadBalancerArgs',
'ElastigroupLoginArgs',
'ElastigroupManagedServiceIdentityArgs',
'ElastigroupNetworkArgs',
'ElastigroupNetworkAdditionalIpConfigArgs',
'ElastigroupScalingDownPolicyArgs',
'ElastigroupScalingDownPolicyDimensionArgs',
'ElastigroupScalingUpPolicyArgs',
'ElastigroupScalingUpPolicyDimensionArgs',
'ElastigroupScheduledTaskArgs',
'ElastigroupStrategyArgs',
'ElastigroupUpdatePolicyArgs',
'ElastigroupUpdatePolicyRollConfigArgs',
'OceanAutoscalerArgs',
'OceanAutoscalerAutoscaleDownArgs',
'OceanAutoscalerAutoscaleHeadroomArgs',
'OceanAutoscalerAutoscaleHeadroomAutomaticArgs',
'OceanAutoscalerResourceLimitsArgs',
'OceanExtensionArgs',
'OceanHealthArgs',
'OceanImageArgs',
'OceanImageMarketplaceArgs',
'OceanLoadBalancerArgs',
'OceanManagedServiceIdentityArgs',
'OceanNetworkArgs',
'OceanNetworkNetworkInterfaceArgs',
'OceanNetworkNetworkInterfaceAdditionalIpConfigArgs',
'OceanNetworkNetworkInterfaceSecurityGroupArgs',
'OceanOsDiskArgs',
'OceanStrategyArgs',
'OceanTagArgs',
'OceanVirtualNodeGroupAutoscaleArgs',
'OceanVirtualNodeGroupAutoscaleAutoscaleHeadroomArgs',
'OceanVirtualNodeGroupLabelArgs',
'OceanVirtualNodeGroupLaunchSpecificationArgs',
'OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs',
'OceanVirtualNodeGroupLaunchSpecificationTagArgs',
'OceanVirtualNodeGroupResourceLimitArgs',
'OceanVirtualNodeGroupTaintArgs',
'OceanVmSizeArgs',
]
@pulumi.input_type
class ElastigroupHealthCheckArgs:
def __init__(__self__, *,
health_check_type: pulumi.Input[str],
auto_healing: Optional[pulumi.Input[bool]] = None,
grace_period: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] health_check_type: Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`.
:param pulumi.Input[bool] auto_healing: Enable auto-healing of unhealthy VMs.
:param pulumi.Input[int] grace_period: Sets the grace period for new instances to become healthy.
"""
pulumi.set(__self__, "health_check_type", health_check_type)
if auto_healing is not None:
pulumi.set(__self__, "auto_healing", auto_healing)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
@property
@pulumi.getter(name="healthCheckType")
def health_check_type(self) -> pulumi.Input[str]:
"""
Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`.
"""
return pulumi.get(self, "health_check_type")
@health_check_type.setter
def health_check_type(self, value: pulumi.Input[str]):
pulumi.set(self, "health_check_type", value)
@property
@pulumi.getter(name="autoHealing")
def auto_healing(self) -> Optional[pulumi.Input[bool]]:
"""
Enable auto-healing of unhealthy VMs.
"""
return pulumi.get(self, "auto_healing")
@auto_healing.setter
def auto_healing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_healing", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Sets the grace period for new instances to become healthy.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grace_period", value)
@pulumi.input_type
class ElastigroupImageArgs:
def __init__(__self__, *,
customs: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupImageCustomArgs']]]] = None,
marketplaces: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupImageMarketplaceArgs']]]] = None):
if customs is not None:
pulumi.set(__self__, "customs", customs)
if marketplaces is not None:
pulumi.set(__self__, "marketplaces", marketplaces)
@property
@pulumi.getter
def customs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupImageCustomArgs']]]]:
return pulumi.get(self, "customs")
@customs.setter
def customs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupImageCustomArgs']]]]):
pulumi.set(self, "customs", value)
@property
@pulumi.getter
def marketplaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupImageMarketplaceArgs']]]]:
return pulumi.get(self, "marketplaces")
@marketplaces.setter
def marketplaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupImageMarketplaceArgs']]]]):
pulumi.set(self, "marketplaces", value)
@pulumi.input_type
class ElastigroupImageCustomArgs:
def __init__(__self__, *,
image_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] image_name: Name of the custom image. Required if resource_group_name is specified.
:param pulumi.Input[str] resource_group_name: Vnet Resource Group Name.
"""
pulumi.set(__self__, "image_name", image_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> pulumi.Input[str]:
"""
Name of the custom image. Required if resource_group_name is specified.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: pulumi.Input[str]):
pulumi.set(self, "image_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Vnet Resource Group Name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class ElastigroupImageMarketplaceArgs:
def __init__(__self__, *,
offer: pulumi.Input[str],
publisher: pulumi.Input[str],
sku: pulumi.Input[str]):
"""
:param pulumi.Input[str] offer: Name of the image to use. Required if publisher is specified.
:param pulumi.Input[str] publisher: Image publisher. Required if resource_group_name is not specified.
:param pulumi.Input[str] sku: Image's Stock Keeping Unit, which is the specific version of the image. Required if publisher is specified.
"""
pulumi.set(__self__, "offer", offer)
pulumi.set(__self__, "publisher", publisher)
pulumi.set(__self__, "sku", sku)
@property
@pulumi.getter
def offer(self) -> pulumi.Input[str]:
"""
Name of the image to use. Required if publisher is specified.
"""
return pulumi.get(self, "offer")
@offer.setter
def offer(self, value: pulumi.Input[str]):
pulumi.set(self, "offer", value)
@property
@pulumi.getter
def publisher(self) -> pulumi.Input[str]:
"""
Image publisher. Required if resource_group_name is not specified.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: pulumi.Input[str]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input[str]:
"""
Image's Stock Keeping Unit, which is the specific version of the image. Required if publisher is specified.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input[str]):
pulumi.set(self, "sku", value)
@pulumi.input_type
class ElastigroupIntegrationKubernetesArgs:
def __init__(__self__, *,
cluster_identifier: pulumi.Input[str]):
"""
:param pulumi.Input[str] cluster_identifier: The cluster ID.
"""
pulumi.set(__self__, "cluster_identifier", cluster_identifier)
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> pulumi.Input[str]:
"""
The cluster ID.
"""
return pulumi.get(self, "cluster_identifier")
@cluster_identifier.setter
def cluster_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_identifier", value)
@pulumi.input_type
class ElastigroupIntegrationMultaiRuntimeArgs:
def __init__(__self__, *,
deployment_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] deployment_id: The deployment id you want to get
"""
pulumi.set(__self__, "deployment_id", deployment_id)
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> pulumi.Input[str]:
"""
The deployment id you want to get
"""
return pulumi.get(self, "deployment_id")
@deployment_id.setter
def deployment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "deployment_id", value)
@pulumi.input_type
class ElastigroupLoadBalancerArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
auto_weight: Optional[pulumi.Input[bool]] = None,
balancer_id: Optional[pulumi.Input[str]] = None,
target_set_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: The resource type. Valid values: CLASSIC, TARGET_GROUP, MULTAI_TARGET_SET.
:param pulumi.Input[str] balancer_id: The balancer ID.
:param pulumi.Input[str] target_set_id: The scale set ID associated with the load balancer.
"""
pulumi.set(__self__, "type", type)
if auto_weight is not None:
pulumi.set(__self__, "auto_weight", auto_weight)
if balancer_id is not None:
pulumi.set(__self__, "balancer_id", balancer_id)
if target_set_id is not None:
pulumi.set(__self__, "target_set_id", target_set_id)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The resource type. Valid values: CLASSIC, TARGET_GROUP, MULTAI_TARGET_SET.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="autoWeight")
def auto_weight(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "auto_weight")
@auto_weight.setter
def auto_weight(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_weight", value)
@property
@pulumi.getter(name="balancerId")
def balancer_id(self) -> Optional[pulumi.Input[str]]:
"""
The balancer ID.
"""
return pulumi.get(self, "balancer_id")
@balancer_id.setter
def balancer_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "balancer_id", value)
@property
@pulumi.getter(name="targetSetId")
def target_set_id(self) -> Optional[pulumi.Input[str]]:
"""
The scale set ID associated with the load balancer.
"""
return pulumi.get(self, "target_set_id")
@target_set_id.setter
def target_set_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_set_id", value)
@pulumi.input_type
class ElastigroupLoginArgs:
def __init__(__self__, *,
user_name: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
ssh_public_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] user_name: Set admin access for accessing your VMs.
:param pulumi.Input[str] password: Password for admin access to Windows VMs. Required for Windows product types.
:param pulumi.Input[str] ssh_public_key: SSH for admin access to Linux VMs. Required for Linux product types.
"""
pulumi.set(__self__, "user_name", user_name)
if password is not None:
pulumi.set(__self__, "password", password)
if ssh_public_key is not None:
pulumi.set(__self__, "ssh_public_key", ssh_public_key)
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Input[str]:
"""
Set admin access for accessing your VMs.
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: pulumi.Input[str]):
pulumi.set(self, "user_name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for admin access to Windows VMs. Required for Windows product types.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="sshPublicKey")
def ssh_public_key(self) -> Optional[pulumi.Input[str]]:
"""
SSH for admin access to Linux VMs. Required for Linux product types.
"""
return pulumi.get(self, "ssh_public_key")
@ssh_public_key.setter
def ssh_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssh_public_key", value)
@pulumi.input_type
class ElastigroupManagedServiceIdentityArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: The dimension name.
:param pulumi.Input[str] resource_group_name: Vnet Resource Group Name.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The dimension name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Vnet Resource Group Name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class ElastigroupNetworkArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
subnet_name: pulumi.Input[str],
virtual_network_name: pulumi.Input[str],
additional_ip_configs: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupNetworkAdditionalIpConfigArgs']]]] = None,
assign_public_ip: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] resource_group_name: Vnet Resource Group Name.
:param pulumi.Input[str] subnet_name: ID of subnet.
:param pulumi.Input[str] virtual_network_name: Name of Vnet.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupNetworkAdditionalIpConfigArgs']]] additional_ip_configs: Array of additional IP configuration objects.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "subnet_name", subnet_name)
pulumi.set(__self__, "virtual_network_name", virtual_network_name)
if additional_ip_configs is not None:
pulumi.set(__self__, "additional_ip_configs", additional_ip_configs)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Vnet Resource Group Name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="subnetName")
def subnet_name(self) -> pulumi.Input[str]:
"""
ID of subnet.
"""
return pulumi.get(self, "subnet_name")
@subnet_name.setter
def subnet_name(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_name", value)
@property
@pulumi.getter(name="virtualNetworkName")
def virtual_network_name(self) -> pulumi.Input[str]:
"""
Name of Vnet.
"""
return pulumi.get(self, "virtual_network_name")
@virtual_network_name.setter
def virtual_network_name(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_network_name", value)
@property
@pulumi.getter(name="additionalIpConfigs")
def additional_ip_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupNetworkAdditionalIpConfigArgs']]]]:
"""
Array of additional IP configuration objects.
"""
return pulumi.get(self, "additional_ip_configs")
@additional_ip_configs.setter
def additional_ip_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupNetworkAdditionalIpConfigArgs']]]]):
pulumi.set(self, "additional_ip_configs", value)
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "assign_public_ip")
@assign_public_ip.setter
def assign_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assign_public_ip", value)
@pulumi.input_type
class ElastigroupNetworkAdditionalIpConfigArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
private_ip_version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The dimension name.
:param pulumi.Input[str] private_ip_version: Available from Azure Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Valid values: `IPv4`, `IPv6`.
"""
pulumi.set(__self__, "name", name)
if private_ip_version is not None:
pulumi.set(__self__, "private_ip_version", private_ip_version)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The dimension name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIpVersion")
def private_ip_version(self) -> Optional[pulumi.Input[str]]:
"""
Available from Azure Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Valid values: `IPv4`, `IPv6`.
"""
return pulumi.get(self, "private_ip_version")
@private_ip_version.setter
def private_ip_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_version", value)
@pulumi.input_type
class ElastigroupScalingDownPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
threshold: pulumi.Input[float],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: Metric to monitor by Azure metric display name.
:param pulumi.Input[str] namespace: The namespace for the alarm's associated metric. Valid values:
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[float] threshold: The value against which the specified statistic is compared.
:param pulumi.Input[str] action_type: The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`.
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]] dimensions: A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] max_target_capacity: . The number of the desired target (and maximum) capacity
:param pulumi.Input[str] maximum: The maximal number of instances to have in the group.
:param pulumi.Input[str] min_target_capacity: . The number of the desired target (and minimum) capacity
:param pulumi.Input[str] minimum: The minimal number of instances to have in the group.
:param pulumi.Input[str] operator: The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
:param pulumi.Input[int] period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
:param pulumi.Input[str] statistic: The metric statistics to return. Valid values: `average`.
:param pulumi.Input[str] target: The target number of instances to have in the group.
:param pulumi.Input[str] unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "threshold", threshold)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if target is not None:
pulumi.set(__self__, "target", target)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
Metric to monitor by Azure metric display name.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The namespace for the alarm's associated metric. Valid values:
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value against which the specified statistic is compared.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`.
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]]:
"""
A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingDownPolicyDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
. The number of the desired target (and maximum) capacity
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximal number of instances to have in the group.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
. The number of the desired target (and minimum) capacity
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimal number of instances to have in the group.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The metric statistics to return. Valid values: `average`.
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The target number of instances to have in the group.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ElastigroupScalingDownPolicyDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The dimension name.
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The dimension name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupScalingUpPolicyArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
namespace: pulumi.Input[str],
policy_name: pulumi.Input[str],
threshold: pulumi.Input[float],
action_type: Optional[pulumi.Input[str]] = None,
adjustment: Optional[pulumi.Input[str]] = None,
cooldown: Optional[pulumi.Input[int]] = None,
dimensions: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]]] = None,
evaluation_periods: Optional[pulumi.Input[int]] = None,
max_target_capacity: Optional[pulumi.Input[str]] = None,
maximum: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[str]] = None,
minimum: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
statistic: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input[str]] = None,
unit: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] metric_name: Metric to monitor by Azure metric display name.
:param pulumi.Input[str] namespace: The namespace for the alarm's associated metric. Valid values:
:param pulumi.Input[str] policy_name: The name of the policy.
:param pulumi.Input[float] threshold: The value against which the specified statistic is compared.
:param pulumi.Input[str] action_type: The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`.
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[int] cooldown: The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
:param pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]] dimensions: A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`.
:param pulumi.Input[int] evaluation_periods: The number of periods over which data is compared to the specified threshold.
:param pulumi.Input[str] max_target_capacity: . The number of the desired target (and maximum) capacity
:param pulumi.Input[str] maximum: The maximal number of instances to have in the group.
:param pulumi.Input[str] min_target_capacity: . The number of the desired target (and minimum) capacity
:param pulumi.Input[str] minimum: The minimal number of instances to have in the group.
:param pulumi.Input[str] operator: The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
:param pulumi.Input[int] period: The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
:param pulumi.Input[str] statistic: The metric statistics to return. Valid values: `average`.
:param pulumi.Input[str] target: The target number of instances to have in the group.
:param pulumi.Input[str] unit: The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "policy_name", policy_name)
pulumi.set(__self__, "threshold", threshold)
if action_type is not None:
pulumi.set(__self__, "action_type", action_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if cooldown is not None:
pulumi.set(__self__, "cooldown", cooldown)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if max_target_capacity is not None:
pulumi.set(__self__, "max_target_capacity", max_target_capacity)
if maximum is not None:
pulumi.set(__self__, "maximum", maximum)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if minimum is not None:
pulumi.set(__self__, "minimum", minimum)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if period is not None:
pulumi.set(__self__, "period", period)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if target is not None:
pulumi.set(__self__, "target", target)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
Metric to monitor by Azure metric display name.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The namespace for the alarm's associated metric. Valid values:
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> pulumi.Input[str]:
"""
The name of the policy.
"""
return pulumi.get(self, "policy_name")
@policy_name.setter
def policy_name(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_name", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
The value against which the specified statistic is compared.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of action to perform for scaling. Valid values: `"adjustment"`, `"percentageAdjustment"`, `"setMaxTarget"`, `"setMinTarget"`, `"updateCapacity"`.
"""
return pulumi.get(self, "action_type")
@action_type.setter
def action_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter
def cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
"""
return pulumi.get(self, "cooldown")
@cooldown.setter
def cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown", value)
@property
@pulumi.getter
def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]]]:
"""
A list of dimensions describing qualities of the metric. Required when `namespace` is defined AND not `"Microsoft.Compute"`.
"""
return pulumi.get(self, "dimensions")
@dimensions.setter
def dimensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupScalingUpPolicyDimensionArgs']]]]):
pulumi.set(self, "dimensions", value)
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[pulumi.Input[int]]:
"""
The number of periods over which data is compared to the specified threshold.
"""
return pulumi.get(self, "evaluation_periods")
@evaluation_periods.setter
def evaluation_periods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "evaluation_periods", value)
@property
@pulumi.getter(name="maxTargetCapacity")
def max_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
. The number of the desired target (and maximum) capacity
"""
return pulumi.get(self, "max_target_capacity")
@max_target_capacity.setter
def max_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_target_capacity", value)
@property
@pulumi.getter
def maximum(self) -> Optional[pulumi.Input[str]]:
"""
The maximal number of instances to have in the group.
"""
return pulumi.get(self, "maximum")
@maximum.setter
def maximum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
. The number of the desired target (and minimum) capacity
"""
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter
def minimum(self) -> Optional[pulumi.Input[str]]:
"""
The minimal number of instances to have in the group.
"""
return pulumi.get(self, "minimum")
@minimum.setter
def minimum(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "minimum", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
The operator to use in order to determine if the scaling policy is applicable. Valid values: `"gt"`, `"gte"`, `"lt"`, `"lte"`.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def statistic(self) -> Optional[pulumi.Input[str]]:
"""
The metric statistics to return. Valid values: `average`.
"""
return pulumi.get(self, "statistic")
@statistic.setter
def statistic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "statistic", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input[str]]:
"""
The target number of instances to have in the group.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target", value)
@property
@pulumi.getter
def unit(self) -> Optional[pulumi.Input[str]]:
"""
The unit for the alarm's associated metric. Valid values: `"percent`, `"seconds"`, `"microseconds"`, `"milliseconds"`, `"bytes"`, `"kilobytes"`, `"megabytes"`, `"gigabytes"`, `"terabytes"`, `"bits"`, `"kilobits"`, `"megabits"`, `"gigabits"`, `"terabits"`, `"count"`, `"bytes/second"`, `"kilobytes/second"`, `"megabytes/second"`, `"gigabytes/second"`, `"terabytes/second"`, `"bits/second"`, `"kilobits/second"`, `"megabits/second"`, `"gigabits/second"`, `"terabits/second"`, `"count/second"`, `"none"`.
"""
return pulumi.get(self, "unit")
@unit.setter
def unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "unit", value)
@pulumi.input_type
class ElastigroupScalingUpPolicyDimensionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The dimension name.
:param pulumi.Input[str] value: The dimension value.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The dimension name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The dimension value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ElastigroupScheduledTaskArgs:
def __init__(__self__, *,
cron_expression: pulumi.Input[str],
task_type: pulumi.Input[str],
adjustment: Optional[pulumi.Input[str]] = None,
adjustment_percentage: Optional[pulumi.Input[str]] = None,
batch_size_percentage: Optional[pulumi.Input[str]] = None,
grace_period: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
scale_max_capacity: Optional[pulumi.Input[str]] = None,
scale_min_capacity: Optional[pulumi.Input[str]] = None,
scale_target_capacity: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] cron_expression: A valid cron expression (`* * * * *`). The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script.
:param pulumi.Input[str] task_type: The task type to run. Valid Values: `backup_ami`, `scale`, `scaleUp`, `roll`, `statefulUpdateCapacity`, `statefulRecycle`.
:param pulumi.Input[str] adjustment: The number of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[str] adjustment_percentage: The percent of instances to add/remove to/from the target capacity when scale is needed.
:param pulumi.Input[str] batch_size_percentage: Sets the percentage of the instances to deploy in each batch.
:param pulumi.Input[str] grace_period: Sets the grace period for new instances to become healthy.
:param pulumi.Input[bool] is_enabled: Describes whether the task is enabled. When true the task should run when false it should not run.
:param pulumi.Input[str] scale_max_capacity: The max capacity of the group. Required when ‘task_type' is ‘scale'.
:param pulumi.Input[str] scale_min_capacity: The min capacity of the group. Should be used when choosing ‘task_type' of ‘scale'.
:param pulumi.Input[str] scale_target_capacity: The target capacity of the group. Should be used when choosing ‘task_type' of ‘scale'.
"""
pulumi.set(__self__, "cron_expression", cron_expression)
pulumi.set(__self__, "task_type", task_type)
if adjustment is not None:
pulumi.set(__self__, "adjustment", adjustment)
if adjustment_percentage is not None:
pulumi.set(__self__, "adjustment_percentage", adjustment_percentage)
if batch_size_percentage is not None:
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if scale_max_capacity is not None:
pulumi.set(__self__, "scale_max_capacity", scale_max_capacity)
if scale_min_capacity is not None:
pulumi.set(__self__, "scale_min_capacity", scale_min_capacity)
if scale_target_capacity is not None:
pulumi.set(__self__, "scale_target_capacity", scale_target_capacity)
@property
@pulumi.getter(name="cronExpression")
def cron_expression(self) -> pulumi.Input[str]:
"""
A valid cron expression (`* * * * *`). The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script.
"""
return pulumi.get(self, "cron_expression")
@cron_expression.setter
def cron_expression(self, value: pulumi.Input[str]):
pulumi.set(self, "cron_expression", value)
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Input[str]:
"""
The task type to run. Valid Values: `backup_ami`, `scale`, `scaleUp`, `roll`, `statefulUpdateCapacity`, `statefulRecycle`.
"""
return pulumi.get(self, "task_type")
@task_type.setter
def task_type(self, value: pulumi.Input[str]):
pulumi.set(self, "task_type", value)
@property
@pulumi.getter
def adjustment(self) -> Optional[pulumi.Input[str]]:
"""
The number of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment")
@adjustment.setter
def adjustment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment", value)
@property
@pulumi.getter(name="adjustmentPercentage")
def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:
"""
The percent of instances to add/remove to/from the target capacity when scale is needed.
"""
return pulumi.get(self, "adjustment_percentage")
@adjustment_percentage.setter
def adjustment_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "adjustment_percentage", value)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> Optional[pulumi.Input[str]]:
"""
Sets the percentage of the instances to deploy in each batch.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[str]]:
"""
Sets the grace period for new instances to become healthy.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Describes whether the task is enabled. When true the task should run when false it should not run.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="scaleMaxCapacity")
def scale_max_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The max capacity of the group. Required when ‘task_type' is ‘scale'.
"""
return pulumi.get(self, "scale_max_capacity")
@scale_max_capacity.setter
def scale_max_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_max_capacity", value)
@property
@pulumi.getter(name="scaleMinCapacity")
def scale_min_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The min capacity of the group. Should be used when choosing ‘task_type' of ‘scale'.
"""
return pulumi.get(self, "scale_min_capacity")
@scale_min_capacity.setter
def scale_min_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_min_capacity", value)
@property
@pulumi.getter(name="scaleTargetCapacity")
def scale_target_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The target capacity of the group. Should be used when choosing ‘task_type' of ‘scale'.
"""
return pulumi.get(self, "scale_target_capacity")
@scale_target_capacity.setter
def scale_target_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_target_capacity", value)
@pulumi.input_type
class ElastigroupStrategyArgs:
def __init__(__self__, *,
draining_timeout: Optional[pulumi.Input[int]] = None,
low_priority_percentage: Optional[pulumi.Input[int]] = None,
od_count: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] draining_timeout: Time (seconds) to allow the instance to be drained from incoming TCP connections and detached from MLB before terminating it during a scale-down operation.
:param pulumi.Input[int] low_priority_percentage: Percentage of Low Priority instances to maintain. Required if `od_count` is not specified.
:param pulumi.Input[int] od_count: Number of On-Demand instances to maintain. Required if low_priority_percentage is not specified.
"""
if draining_timeout is not None:
pulumi.set(__self__, "draining_timeout", draining_timeout)
if low_priority_percentage is not None:
pulumi.set(__self__, "low_priority_percentage", low_priority_percentage)
if od_count is not None:
pulumi.set(__self__, "od_count", od_count)
@property
@pulumi.getter(name="drainingTimeout")
def draining_timeout(self) -> Optional[pulumi.Input[int]]:
"""
Time (seconds) to allow the instance to be drained from incoming TCP connections and detached from MLB before terminating it during a scale-down operation.
"""
return pulumi.get(self, "draining_timeout")
@draining_timeout.setter
def draining_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "draining_timeout", value)
@property
@pulumi.getter(name="lowPriorityPercentage")
def low_priority_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Percentage of Low Priority instances to maintain. Required if `od_count` is not specified.
"""
return pulumi.get(self, "low_priority_percentage")
@low_priority_percentage.setter
def low_priority_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "low_priority_percentage", value)
@property
@pulumi.getter(name="odCount")
def od_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of On-Demand instances to maintain. Required if low_priority_percentage is not specified.
"""
return pulumi.get(self, "od_count")
@od_count.setter
def od_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "od_count", value)
@pulumi.input_type
class ElastigroupUpdatePolicyArgs:
def __init__(__self__, *,
should_roll: pulumi.Input[bool],
roll_config: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs']] = None):
"""
:param pulumi.Input[bool] should_roll: Sets the enablement of the roll option.
:param pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs'] roll_config: While used, you can control whether the group should perform a deployment after an update to the configuration.
"""
pulumi.set(__self__, "should_roll", should_roll)
if roll_config is not None:
pulumi.set(__self__, "roll_config", roll_config)
@property
@pulumi.getter(name="shouldRoll")
def should_roll(self) -> pulumi.Input[bool]:
"""
Sets the enablement of the roll option.
"""
return pulumi.get(self, "should_roll")
@should_roll.setter
def should_roll(self, value: pulumi.Input[bool]):
pulumi.set(self, "should_roll", value)
@property
@pulumi.getter(name="rollConfig")
def roll_config(self) -> Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs']]:
"""
While used, you can control whether the group should perform a deployment after an update to the configuration.
"""
return pulumi.get(self, "roll_config")
@roll_config.setter
def roll_config(self, value: Optional[pulumi.Input['ElastigroupUpdatePolicyRollConfigArgs']]):
pulumi.set(self, "roll_config", value)
@pulumi.input_type
class ElastigroupUpdatePolicyRollConfigArgs:
def __init__(__self__, *,
batch_size_percentage: pulumi.Input[int],
grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] batch_size_percentage: Sets the percentage of the instances to deploy in each batch.
:param pulumi.Input[int] grace_period: Sets the grace period for new instances to become healthy.
:param pulumi.Input[str] health_check_type: Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`.
"""
pulumi.set(__self__, "batch_size_percentage", batch_size_percentage)
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
if health_check_type is not None:
pulumi.set(__self__, "health_check_type", health_check_type)
@property
@pulumi.getter(name="batchSizePercentage")
def batch_size_percentage(self) -> pulumi.Input[int]:
"""
Sets the percentage of the instances to deploy in each batch.
"""
return pulumi.get(self, "batch_size_percentage")
@batch_size_percentage.setter
def batch_size_percentage(self, value: pulumi.Input[int]):
pulumi.set(self, "batch_size_percentage", value)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Sets the grace period for new instances to become healthy.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grace_period", value)
@property
@pulumi.getter(name="healthCheckType")
def health_check_type(self) -> Optional[pulumi.Input[str]]:
"""
Sets the health check type to use. Valid values: `"INSTANCE_STATE"`, `"NONE"`.
"""
return pulumi.get(self, "health_check_type")
@health_check_type.setter
def health_check_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_type", value)
@pulumi.input_type
class OceanAutoscalerArgs:
def __init__(__self__, *,
autoscale_down: Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']] = None,
autoscale_headroom: Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs']] = None,
autoscale_is_enabled: Optional[pulumi.Input[bool]] = None,
resource_limits: Optional[pulumi.Input['OceanAutoscalerResourceLimitsArgs']] = None):
"""
:param pulumi.Input['OceanAutoscalerAutoscaleDownArgs'] autoscale_down: Auto Scaling scale down operations.
:param pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs'] autoscale_headroom: Spare Resource Capacity Management feature enables fast assignment of Pods without having to wait for new resources to be launched.
:param pulumi.Input[bool] autoscale_is_enabled: Enable the Ocean Kubernetes Autoscaler.
:param pulumi.Input['OceanAutoscalerResourceLimitsArgs'] resource_limits: Optionally set upper and lower bounds on the resource usage of the cluster.
"""
if autoscale_down is not None:
pulumi.set(__self__, "autoscale_down", autoscale_down)
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
if autoscale_is_enabled is not None:
pulumi.set(__self__, "autoscale_is_enabled", autoscale_is_enabled)
if resource_limits is not None:
pulumi.set(__self__, "resource_limits", resource_limits)
@property
@pulumi.getter(name="autoscaleDown")
def autoscale_down(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']]:
"""
Auto Scaling scale down operations.
"""
return pulumi.get(self, "autoscale_down")
@autoscale_down.setter
def autoscale_down(self, value: Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']]):
pulumi.set(self, "autoscale_down", value)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs']]:
"""
Spare Resource Capacity Management feature enables fast assignment of Pods without having to wait for new resources to be launched.
"""
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@property
@pulumi.getter(name="autoscaleIsEnabled")
def autoscale_is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the Ocean Kubernetes Autoscaler.
"""
return pulumi.get(self, "autoscale_is_enabled")
@autoscale_is_enabled.setter
def autoscale_is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "autoscale_is_enabled", value)
@property
@pulumi.getter(name="resourceLimits")
def resource_limits(self) -> Optional[pulumi.Input['OceanAutoscalerResourceLimitsArgs']]:
"""
Optionally set upper and lower bounds on the resource usage of the cluster.
"""
return pulumi.get(self, "resource_limits")
@resource_limits.setter
def resource_limits(self, value: Optional[pulumi.Input['OceanAutoscalerResourceLimitsArgs']]):
pulumi.set(self, "resource_limits", value)
@pulumi.input_type
class OceanAutoscalerAutoscaleDownArgs:
def __init__(__self__, *,
max_scale_down_percentage: Optional[pulumi.Input[float]] = None):
"""
:param pulumi.Input[float] max_scale_down_percentage: Would represent the maximum % to scale-down.
"""
if max_scale_down_percentage is not None:
pulumi.set(__self__, "max_scale_down_percentage", max_scale_down_percentage)
@property
@pulumi.getter(name="maxScaleDownPercentage")
def max_scale_down_percentage(self) -> Optional[pulumi.Input[float]]:
"""
Would represent the maximum % to scale-down.
"""
return pulumi.get(self, "max_scale_down_percentage")
@max_scale_down_percentage.setter
def max_scale_down_percentage(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_scale_down_percentage", value)
@pulumi.input_type
class OceanAutoscalerAutoscaleHeadroomArgs:
def __init__(__self__, *,
automatic: Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomAutomaticArgs']] = None):
"""
:param pulumi.Input['OceanAutoscalerAutoscaleHeadroomAutomaticArgs'] automatic: Automatic headroom configuration.
"""
if automatic is not None:
pulumi.set(__self__, "automatic", automatic)
@property
@pulumi.getter
def automatic(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomAutomaticArgs']]:
"""
Automatic headroom configuration.
"""
return pulumi.get(self, "automatic")
@automatic.setter
def automatic(self, value: Optional[pulumi.Input['OceanAutoscalerAutoscaleHeadroomAutomaticArgs']]):
pulumi.set(self, "automatic", value)
@pulumi.input_type
class OceanAutoscalerAutoscaleHeadroomAutomaticArgs:
def __init__(__self__, *,
is_enabled: Optional[pulumi.Input[bool]] = None,
percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[bool] is_enabled: Enable automatic headroom. When set to `true`, Ocean configures and optimizes headroom automatically.
:param pulumi.Input[int] percentage: Optionally set a number between 0-100 to control the percentage of total cluster resources dedicated to headroom. Relevant when `isEnabled` is toggled on.
"""
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if percentage is not None:
pulumi.set(__self__, "percentage", percentage)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable automatic headroom. When set to `true`, Ocean configures and optimizes headroom automatically.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def percentage(self) -> Optional[pulumi.Input[int]]:
"""
Optionally set a number between 0-100 to control the percentage of total cluster resources dedicated to headroom. Relevant when `isEnabled` is toggled on.
"""
return pulumi.get(self, "percentage")
@percentage.setter
def percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "percentage", value)
@pulumi.input_type
class OceanAutoscalerResourceLimitsArgs:
def __init__(__self__, *,
max_memory_gib: Optional[pulumi.Input[int]] = None,
max_vcpu: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_memory_gib: The maximum memory in GiB units that can be allocated to the cluster.
:param pulumi.Input[int] max_vcpu: The maximum cpu in vCpu units that can be allocated to the cluster.
"""
if max_memory_gib is not None:
pulumi.set(__self__, "max_memory_gib", max_memory_gib)
if max_vcpu is not None:
pulumi.set(__self__, "max_vcpu", max_vcpu)
@property
@pulumi.getter(name="maxMemoryGib")
def max_memory_gib(self) -> Optional[pulumi.Input[int]]:
"""
The maximum memory in GiB units that can be allocated to the cluster.
"""
return pulumi.get(self, "max_memory_gib")
@max_memory_gib.setter
def max_memory_gib(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_memory_gib", value)
@property
@pulumi.getter(name="maxVcpu")
def max_vcpu(self) -> Optional[pulumi.Input[int]]:
"""
The maximum cpu in vCpu units that can be allocated to the cluster.
"""
return pulumi.get(self, "max_vcpu")
@max_vcpu.setter
def max_vcpu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_vcpu", value)
@pulumi.input_type
class OceanExtensionArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
minor_version_auto_upgrade: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] api_version: API version of the extension.
:param pulumi.Input[bool] minor_version_auto_upgrade: Toggles whether auto upgrades are allowed.
:param pulumi.Input[str] name: Name of the Load Balancer.
:param pulumi.Input[str] publisher: Image publisher.
:param pulumi.Input[str] type: The type of load balancer. Supported value: `loadBalancer`
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if minor_version_auto_upgrade is not None:
pulumi.set(__self__, "minor_version_auto_upgrade", minor_version_auto_upgrade)
if name is not None:
pulumi.set(__self__, "name", name)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the extension.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="minorVersionAutoUpgrade")
def minor_version_auto_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Toggles whether auto upgrades are allowed.
"""
return pulumi.get(self, "minor_version_auto_upgrade")
@minor_version_auto_upgrade.setter
def minor_version_auto_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "minor_version_auto_upgrade", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
Image publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of load balancer. Supported value: `loadBalancer`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class OceanHealthArgs:
def __init__(__self__, *,
grace_period: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] grace_period: The amount of time to wait, in seconds, from the moment the instance has launched before monitoring its health checks.
"""
if grace_period is not None:
pulumi.set(__self__, "grace_period", grace_period)
@property
@pulumi.getter(name="gracePeriod")
def grace_period(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time to wait, in seconds, from the moment the instance has launched before monitoring its health checks.
"""
return pulumi.get(self, "grace_period")
@grace_period.setter
def grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grace_period", value)
@pulumi.input_type
class OceanImageArgs:
def __init__(__self__, *,
marketplaces: Optional[pulumi.Input[Sequence[pulumi.Input['OceanImageMarketplaceArgs']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['OceanImageMarketplaceArgs']]] marketplaces: Select an image from Azure's Marketplace image catalogue.
"""
if marketplaces is not None:
pulumi.set(__self__, "marketplaces", marketplaces)
@property
@pulumi.getter
def marketplaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanImageMarketplaceArgs']]]]:
"""
Select an image from Azure's Marketplace image catalogue.
"""
return pulumi.get(self, "marketplaces")
@marketplaces.setter
def marketplaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanImageMarketplaceArgs']]]]):
pulumi.set(self, "marketplaces", value)
@pulumi.input_type
class OceanImageMarketplaceArgs:
def __init__(__self__, *,
offer: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] offer: Image name.
:param pulumi.Input[str] publisher: Image publisher.
:param pulumi.Input[str] sku: Image Stock Keeping Unit (which is the specific version of the image).
:param pulumi.Input[str] version: Image version.
"""
if offer is not None:
pulumi.set(__self__, "offer", offer)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def offer(self) -> Optional[pulumi.Input[str]]:
"""
Image name.
"""
return pulumi.get(self, "offer")
@offer.setter
def offer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "offer", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
Image publisher.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
Image Stock Keeping Unit (which is the specific version of the image).
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Image version.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class OceanLoadBalancerArgs:
def __init__(__self__, *,
backend_pool_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
load_balancer_sku: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] backend_pool_names: Names of the Backend Pools to register the Cluster VMs to. Each Backend Pool is a separate load balancer.
:param pulumi.Input[str] load_balancer_sku: Supported values: `Standard`, `Basic`.
:param pulumi.Input[str] name: Name of the Load Balancer.
:param pulumi.Input[str] resource_group_name: The Resource Group name of the Load Balancer.
:param pulumi.Input[str] type: The type of load balancer. Supported value: `loadBalancer`
"""
if backend_pool_names is not None:
pulumi.set(__self__, "backend_pool_names", backend_pool_names)
if load_balancer_sku is not None:
pulumi.set(__self__, "load_balancer_sku", load_balancer_sku)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backendPoolNames")
def backend_pool_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Names of the Backend Pools to register the Cluster VMs to. Each Backend Pool is a separate load balancer.
"""
return pulumi.get(self, "backend_pool_names")
@backend_pool_names.setter
def backend_pool_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "backend_pool_names", value)
@property
@pulumi.getter(name="loadBalancerSku")
def load_balancer_sku(self) -> Optional[pulumi.Input[str]]:
"""
Supported values: `Standard`, `Basic`.
"""
return pulumi.get(self, "load_balancer_sku")
@load_balancer_sku.setter
def load_balancer_sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancer_sku", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Resource Group name of the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of load balancer. Supported value: `loadBalancer`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class OceanManagedServiceIdentityArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: Name of the Load Balancer.
:param pulumi.Input[str] resource_group_name: The Resource Group name of the Load Balancer.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The Resource Group name of the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class OceanNetworkArgs:
def __init__(__self__, *,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_network_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]] network_interfaces: A list of virtual network interfaces. The publicIpSku must be identical between all the network interfaces. One network interface must be set as the primary.
:param pulumi.Input[str] resource_group_name: The Resource Group name of the Load Balancer.
:param pulumi.Input[str] virtual_network_name: Virtual network.
"""
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if virtual_network_name is not None:
pulumi.set(__self__, "virtual_network_name", virtual_network_name)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]:
"""
A list of virtual network interfaces. The publicIpSku must be identical between all the network interfaces. One network interface must be set as the primary.
"""
return pulumi.get(self, "network_interfaces")
@network_interfaces.setter
def network_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceArgs']]]]):
pulumi.set(self, "network_interfaces", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Resource Group name of the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="virtualNetworkName")
def virtual_network_name(self) -> Optional[pulumi.Input[str]]:
"""
Virtual network.
"""
return pulumi.get(self, "virtual_network_name")
@virtual_network_name.setter
def virtual_network_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_network_name", value)
@pulumi.input_type
class OceanNetworkNetworkInterfaceArgs:
def __init__(__self__, *,
additional_ip_configs: Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceAdditionalIpConfigArgs']]]] = None,
assign_public_ip: Optional[pulumi.Input[bool]] = None,
is_primary: Optional[pulumi.Input[bool]] = None,
security_group: Optional[pulumi.Input['OceanNetworkNetworkInterfaceSecurityGroupArgs']] = None,
subnet_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceAdditionalIpConfigArgs']]] additional_ip_configs: Additional configuration of network interface. The name fields between all the `additional_ip_config` must be unique.
:param pulumi.Input[bool] assign_public_ip: Assign public IP.
:param pulumi.Input[bool] is_primary: Defines whether the network interface is primary or not.
:param pulumi.Input[str] subnet_name: Subnet name.
"""
if additional_ip_configs is not None:
pulumi.set(__self__, "additional_ip_configs", additional_ip_configs)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if is_primary is not None:
pulumi.set(__self__, "is_primary", is_primary)
if security_group is not None:
pulumi.set(__self__, "security_group", security_group)
if subnet_name is not None:
pulumi.set(__self__, "subnet_name", subnet_name)
@property
@pulumi.getter(name="additionalIpConfigs")
def additional_ip_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceAdditionalIpConfigArgs']]]]:
"""
Additional configuration of network interface. The name fields between all the `additional_ip_config` must be unique.
"""
return pulumi.get(self, "additional_ip_configs")
@additional_ip_configs.setter
def additional_ip_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanNetworkNetworkInterfaceAdditionalIpConfigArgs']]]]):
pulumi.set(self, "additional_ip_configs", value)
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Assign public IP.
"""
return pulumi.get(self, "assign_public_ip")
@assign_public_ip.setter
def assign_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assign_public_ip", value)
@property
@pulumi.getter(name="isPrimary")
def is_primary(self) -> Optional[pulumi.Input[bool]]:
"""
Defines whether the network interface is primary or not.
"""
return pulumi.get(self, "is_primary")
@is_primary.setter
def is_primary(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_primary", value)
@property
@pulumi.getter(name="securityGroup")
def security_group(self) -> Optional[pulumi.Input['OceanNetworkNetworkInterfaceSecurityGroupArgs']]:
return pulumi.get(self, "security_group")
@security_group.setter
def security_group(self, value: Optional[pulumi.Input['OceanNetworkNetworkInterfaceSecurityGroupArgs']]):
pulumi.set(self, "security_group", value)
@property
@pulumi.getter(name="subnetName")
def subnet_name(self) -> Optional[pulumi.Input[str]]:
"""
Subnet name.
"""
return pulumi.get(self, "subnet_name")
@subnet_name.setter
def subnet_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_name", value)
@pulumi.input_type
class OceanNetworkNetworkInterfaceAdditionalIpConfigArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
private_ip_version: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name of the Load Balancer.
:param pulumi.Input[str] private_ip_version: Supported values: `IPv4`, `IPv6`.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_version is not None:
pulumi.set(__self__, "private_ip_version", private_ip_version)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIpVersion")
def private_ip_version(self) -> Optional[pulumi.Input[str]]:
"""
Supported values: `IPv4`, `IPv6`.
"""
return pulumi.get(self, "private_ip_version")
@private_ip_version.setter
def private_ip_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_version", value)
@pulumi.input_type
class OceanNetworkNetworkInterfaceSecurityGroupArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name of the Load Balancer.
:param pulumi.Input[str] resource_group_name: The Resource Group name of the Load Balancer.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Load Balancer.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Resource Group name of the Load Balancer.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class OceanOsDiskArgs:
def __init__(__self__, *,
size_gb: pulumi.Input[int],
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] size_gb: The size of the OS disk in GB.
:param pulumi.Input[str] type: The type of load balancer. Supported value: `loadBalancer`
"""
pulumi.set(__self__, "size_gb", size_gb)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="sizeGb")
def size_gb(self) -> pulumi.Input[int]:
"""
The size of the OS disk in GB.
"""
return pulumi.get(self, "size_gb")
@size_gb.setter
def size_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_gb", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of load balancer. Supported value: `loadBalancer`
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class OceanStrategyArgs:
def __init__(__self__, *,
fallback_to_ondemand: Optional[pulumi.Input[bool]] = None,
spot_percentage: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[bool] fallback_to_ondemand: If no spot instance markets are available, enable Ocean to launch on-demand instances instead.
:param pulumi.Input[int] spot_percentage: Percentage of Spot VMs to maintain.
"""
if fallback_to_ondemand is not None:
pulumi.set(__self__, "fallback_to_ondemand", fallback_to_ondemand)
if spot_percentage is not None:
pulumi.set(__self__, "spot_percentage", spot_percentage)
@property
@pulumi.getter(name="fallbackToOndemand")
def fallback_to_ondemand(self) -> Optional[pulumi.Input[bool]]:
"""
If no spot instance markets are available, enable Ocean to launch on-demand instances instead.
"""
return pulumi.get(self, "fallback_to_ondemand")
@fallback_to_ondemand.setter
def fallback_to_ondemand(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fallback_to_ondemand", value)
@property
@pulumi.getter(name="spotPercentage")
def spot_percentage(self) -> Optional[pulumi.Input[int]]:
"""
Percentage of Spot VMs to maintain.
"""
return pulumi.get(self, "spot_percentage")
@spot_percentage.setter
def spot_percentage(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "spot_percentage", value)
@pulumi.input_type
class OceanTagArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Tag key.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanVirtualNodeGroupAutoscaleArgs:
def __init__(__self__, *,
autoscale_headroom: Optional[pulumi.Input['OceanVirtualNodeGroupAutoscaleAutoscaleHeadroomArgs']] = None):
if autoscale_headroom is not None:
pulumi.set(__self__, "autoscale_headroom", autoscale_headroom)
@property
@pulumi.getter(name="autoscaleHeadroom")
def autoscale_headroom(self) -> Optional[pulumi.Input['OceanVirtualNodeGroupAutoscaleAutoscaleHeadroomArgs']]:
return pulumi.get(self, "autoscale_headroom")
@autoscale_headroom.setter
def autoscale_headroom(self, value: Optional[pulumi.Input['OceanVirtualNodeGroupAutoscaleAutoscaleHeadroomArgs']]):
pulumi.set(self, "autoscale_headroom", value)
@pulumi.input_type
class OceanVirtualNodeGroupAutoscaleAutoscaleHeadroomArgs:
def __init__(__self__, *,
num_of_units: pulumi.Input[int],
cpu_per_unit: Optional[pulumi.Input[int]] = None,
gpu_per_unit: Optional[pulumi.Input[int]] = None,
memory_per_unit: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] num_of_units: The number of headroom units to maintain, where each unit has the defined CPU, memory and GPU.
:param pulumi.Input[int] cpu_per_unit: Configure the number of CPUs to allocate for the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
:param pulumi.Input[int] gpu_per_unit: How many GPU cores should be allocated for headroom unit.
:param pulumi.Input[int] memory_per_unit: Configure the amount of memory (MiB) to allocate the headroom.
"""
pulumi.set(__self__, "num_of_units", num_of_units)
if cpu_per_unit is not None:
pulumi.set(__self__, "cpu_per_unit", cpu_per_unit)
if gpu_per_unit is not None:
pulumi.set(__self__, "gpu_per_unit", gpu_per_unit)
if memory_per_unit is not None:
pulumi.set(__self__, "memory_per_unit", memory_per_unit)
@property
@pulumi.getter(name="numOfUnits")
def num_of_units(self) -> pulumi.Input[int]:
"""
The number of headroom units to maintain, where each unit has the defined CPU, memory and GPU.
"""
return pulumi.get(self, "num_of_units")
@num_of_units.setter
def num_of_units(self, value: pulumi.Input[int]):
pulumi.set(self, "num_of_units", value)
@property
@pulumi.getter(name="cpuPerUnit")
def cpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Configure the number of CPUs to allocate for the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
"""
return pulumi.get(self, "cpu_per_unit")
@cpu_per_unit.setter
def cpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_per_unit", value)
@property
@pulumi.getter(name="gpuPerUnit")
def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
How many GPU cores should be allocated for headroom unit.
"""
return pulumi.get(self, "gpu_per_unit")
@gpu_per_unit.setter
def gpu_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "gpu_per_unit", value)
@property
@pulumi.getter(name="memoryPerUnit")
def memory_per_unit(self) -> Optional[pulumi.Input[int]]:
"""
Configure the amount of memory (MiB) to allocate the headroom.
"""
return pulumi.get(self, "memory_per_unit")
@memory_per_unit.setter
def memory_per_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_per_unit", value)
@pulumi.input_type
class OceanVirtualNodeGroupLabelArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Tag Key for Vms in the cluster.
:param pulumi.Input[str] value: Tag Value for VMs in the cluster.
"""
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag Key for Vms in the cluster.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag Value for VMs in the cluster.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanVirtualNodeGroupLaunchSpecificationArgs:
def __init__(__self__, *,
os_disk: Optional[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationTagArgs']]]] = None):
"""
:param pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs'] os_disk: Specify OS disk specification other than default.
:param pulumi.Input[Sequence[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationTagArgs']]] tags: Additional key-value pairs to be used to tag the VMs in the virtual node group.
"""
if os_disk is not None:
pulumi.set(__self__, "os_disk", os_disk)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="osDisk")
def os_disk(self) -> Optional[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs']]:
"""
Specify OS disk specification other than default.
"""
return pulumi.get(self, "os_disk")
@os_disk.setter
def os_disk(self, value: Optional[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs']]):
pulumi.set(self, "os_disk", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationTagArgs']]]]:
"""
Additional key-value pairs to be used to tag the VMs in the virtual node group.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OceanVirtualNodeGroupLaunchSpecificationTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class OceanVirtualNodeGroupLaunchSpecificationOsDiskArgs:
def __init__(__self__, *,
size_gb: pulumi.Input[int],
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] size_gb: The size of the OS disk in GB, Required if dataDisks is specified.
:param pulumi.Input[str] type: The type of the OS disk. Valid values: `"Standard_LRS"`, `"Premium_LRS"`, `"StandardSSD_LRS"`.
"""
pulumi.set(__self__, "size_gb", size_gb)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="sizeGb")
def size_gb(self) -> pulumi.Input[int]:
"""
The size of the OS disk in GB, Required if dataDisks is specified.
"""
return pulumi.get(self, "size_gb")
@size_gb.setter
def size_gb(self, value: pulumi.Input[int]):
pulumi.set(self, "size_gb", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the OS disk. Valid values: `"Standard_LRS"`, `"Premium_LRS"`, `"StandardSSD_LRS"`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class OceanVirtualNodeGroupLaunchSpecificationTagArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Tag Key for Vms in the cluster.
:param pulumi.Input[str] value: Tag Value for VMs in the cluster.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag Key for Vms in the cluster.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag Value for VMs in the cluster.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanVirtualNodeGroupResourceLimitArgs:
def __init__(__self__, *,
max_instance_count: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] max_instance_count: Option to set a maximum number of instances per virtual node group. If set, value must be greater than or equal to 0.
"""
if max_instance_count is not None:
pulumi.set(__self__, "max_instance_count", max_instance_count)
@property
@pulumi.getter(name="maxInstanceCount")
def max_instance_count(self) -> Optional[pulumi.Input[int]]:
"""
Option to set a maximum number of instances per virtual node group. If set, value must be greater than or equal to 0.
"""
return pulumi.get(self, "max_instance_count")
@max_instance_count.setter
def max_instance_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_instance_count", value)
@pulumi.input_type
class OceanVirtualNodeGroupTaintArgs:
def __init__(__self__, *,
effect: pulumi.Input[str],
key: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] effect: The effect of the taint. Valid values: `"NoSchedule"`, `"PreferNoSchedule"`, `"NoExecute"`, `"PreferNoExecute"`.
:param pulumi.Input[str] key: Tag Key for Vms in the cluster.
:param pulumi.Input[str] value: Tag Value for VMs in the cluster.
"""
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> pulumi.Input[str]:
"""
The effect of the taint. Valid values: `"NoSchedule"`, `"PreferNoSchedule"`, `"NoExecute"`, `"PreferNoExecute"`.
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: pulumi.Input[str]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag Key for Vms in the cluster.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag Value for VMs in the cluster.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class OceanVmSizeArgs:
def __init__(__self__, *,
whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] whitelists: VM types allowed in the Ocean cluster.
"""
if whitelists is not None:
pulumi.set(__self__, "whitelists", whitelists)
@property
@pulumi.getter
def whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
VM types allowed in the Ocean cluster.
"""
return pulumi.get(self, "whitelists")
@whitelists.setter
def whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "whitelists", value)
| 39.59337 | 540 | 0.652443 | 12,422 | 107,496 | 5.461198 | 0.043149 | 0.110585 | 0.078421 | 0.059346 | 0.85868 | 0.795368 | 0.749245 | 0.679992 | 0.665414 | 0.638468 | 0 | 0.000787 | 0.231311 | 107,496 | 2,714 | 541 | 39.607959 | 0.820201 | 0.24986 | 0 | 0.633172 | 1 | 0 | 0.127602 | 0.051134 | 0 | 0 | 0 | 0 | 0 | 1 | 0.209443 | false | 0.004843 | 0.003027 | 0.003632 | 0.331114 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b644fe9fa763e62ec78da8f9d37fd190ef7c108f | 23 | py | Python | femb/backbones/networks/__init__.py | jonasgrebe/pt-femb-face-embeddings | 8f055a59293d75ad60d4b0a92f86ee6f3f07e950 | [
"MIT"
] | 16 | 2021-04-16T14:57:08.000Z | 2022-02-23T08:09:39.000Z | femb/backbones/networks/__init__.py | jonasgrebe/pt-femb-face-embeddings | 8f055a59293d75ad60d4b0a92f86ee6f3f07e950 | [
"MIT"
] | 1 | 2022-01-05T14:10:16.000Z | 2022-01-06T08:13:13.000Z | femb/backbones/networks/__init__.py | jonasgrebe/pt-femb-face-embeddings | 8f055a59293d75ad60d4b0a92f86ee6f3f07e950 | [
"MIT"
] | 3 | 2021-04-16T13:41:25.000Z | 2022-02-23T08:09:42.000Z | from .iresnet import *
| 11.5 | 22 | 0.73913 | 3 | 23 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 23 | 1 | 23 | 23 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b65dbf1a6a3da6387d778343cbcbd38fb3bc9384 | 16,564 | py | Python | issue_tracker/tracker/tests.py | BrnoPCmaniak/example-django-issue-tracker | 1ae51eb3bf1fa532dbdcf336b97bd41e4f9d40fc | [
"MIT"
] | null | null | null | issue_tracker/tracker/tests.py | BrnoPCmaniak/example-django-issue-tracker | 1ae51eb3bf1fa532dbdcf336b97bd41e4f9d40fc | [
"MIT"
] | null | null | null | issue_tracker/tracker/tests.py | BrnoPCmaniak/example-django-issue-tracker | 1ae51eb3bf1fa532dbdcf336b97bd41e4f9d40fc | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.test import Client, TestCase
from tracker.models import ISSUE_ASSIGNED, ISSUE_CANCELED, ISSUE_CREATED, ISSUE_DONE, Issue, IssueCategory
class ModelTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a")
self.test_user_2 = User.objects.create(username="user_b")
def test_assign(self):
"""Test that when user is assigned the state changes too."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
self.assertEqual(issue.state, ISSUE_CREATED)
self.assertIsNone(issue.solver)
issue.solver = self.test_user_2
issue.save()
self.assertEqual(issue.state, ISSUE_ASSIGNED)
self.assertEqual(issue.solver, self.test_user_2)
self.assertIsNotNone(issue.assigned_at)
def test_done(self):
"""Test that when state is done the duration is calculated."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.",
solver=self.test_user_2)
self.assertEqual(issue.state, ISSUE_ASSIGNED)
self.assertEqual(issue.solver, self.test_user_2)
self.assertIsNotNone(issue.assigned_at)
issue.state = ISSUE_DONE
issue.save()
self.assertEqual(issue.state, ISSUE_DONE)
self.assertIsNotNone(issue.completed_in)
def test_done_without_assigned(self):
"""Test that issue can be marked as done without having to be assigned first."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
self.assertEqual(issue.state, ISSUE_CREATED)
self.assertIsNone(issue.solver)
self.assertIsNone(issue.assigned_at)
issue.state = ISSUE_DONE
issue.save()
self.assertEqual(issue.state, ISSUE_DONE)
self.assertIsNotNone(issue.completed_in)
def test_clean_assigned(self):
"""Test that when state is marked as assigned issue have to have solver."""
issue = Issue(name="Test", created_by=self.test_user_1, description="Test description.",
state=ISSUE_ASSIGNED,
solver=None)
self.assertRaises(ValidationError, issue.full_clean)
class EditTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a", is_superuser=True)
self.test_user_2 = User.objects.create(username="user_b")
self.issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
self.category = IssueCategory.objects.create(name="Test")
self.client = Client()
self.client.force_login(self.test_user_1)
def test_name(self):
"""Test changing name via API."""
new_value = "Test change"
response = self.client.post("/issue/edit/%d/" % self.issue.pk, {"name": "name", "value": new_value})
self.assertEqual(response.status_code, 200)
self.assertEqual(Issue.objects.get(pk=self.issue.pk).name, new_value)
def test_description(self):
"""Test changing description via API."""
new_value = "Test longer description."
response = self.client.post("/issue/edit/%d/" % self.issue.pk, {"name": "description", "value": new_value})
self.assertEqual(response.status_code, 200)
self.assertEqual(Issue.objects.get(pk=self.issue.pk).description, new_value)
def test_category(self):
"""Test changing category via API."""
new_value = self.category.pk
response = self.client.post("/issue/edit/%d/" % self.issue.pk, {"name": "category", "value": new_value})
self.assertEqual(response.status_code, 200)
self.assertEqual(Issue.objects.get(pk=self.issue.pk).category_id, new_value)
def test_solver(self):
"""Test assigning solver via API."""
new_value = self.test_user_2.pk
response = self.client.post("/issue/edit/%d/" % self.issue.pk, {"name": "solver", "value": new_value})
self.assertEqual(response.status_code, 200)
issue = Issue.objects.get(pk=self.issue.pk)
self.assertEqual(issue.solver_id, new_value)
self.assertEqual(issue.state, ISSUE_ASSIGNED)
def test_permission_denied(self):
"""Test change without permission won't do anything."""
c = Client()
c.force_login(self.test_user_2)
i = Issue.objects.get(pk=self.issue.pk)
response = c.post("/issue/edit/%d/" % self.issue.pk, {"name": "name", "value": "XX"})
self.assertEqual(response.status_code, 403)
self.assertEqual(Issue.objects.get(pk=i.pk).name, i.name)
class UserSelectViewTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a", is_superuser=True)
self.test_user_2 = User.objects.create(username="user_b")
self.client = Client()
self.client.force_login(self.test_user_1)
def test_username_search(self):
"""Test searching for users by their username."""
u = User.objects.create(username="user_c")
response = self.client.post("/users/", {"q": u.username})
self.assertEqual(response.status_code, 200)
self.assertEqual('[{"ID": %d, "Name": "%s", "Username": "%s"}]' % (u.pk, u.username, u.username),
response.content.decode('ascii'))
def test_first_name_search(self):
"""Test searching for users by their first name."""
u = User.objects.create(username="user_d", first_name="John", last_name="Smith")
response = self.client.post("/users/", {"q": u.first_name})
self.assertEqual(response.status_code, 200)
self.assertEqual('[{"ID": %d, "Name": "%s", "Username": "%s"}]' % (u.pk, u.get_full_name(), u.username),
response.content.decode('ascii'))
def test_last_name_search(self):
"""Test searching for users by their last name."""
u = User.objects.create(username="user_e", first_name="John", last_name="Smith")
response = self.client.post("/users/", {"q": u.last_name})
self.assertEqual(response.status_code, 200)
self.assertEqual('[{"ID": %d, "Name": "%s", "Username": "%s"}]' % (u.pk, u.get_full_name(), u.username),
response.content.decode('ascii'))
def test_permission_denied(self):
"""Test that users without permission can't search anyone."""
c = Client()
c.force_login(self.test_user_2)
u = User.objects.create(username="user_f")
response = c.post("/users/", {"q": "user_f"})
self.assertEqual(response.status_code, 302)
class DeleteViewTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a", is_superuser=True)
self.test_user_2 = User.objects.create(username="user_b")
self.client = Client()
self.client.force_login(self.test_user_1)
def test_delete(self):
"""Test deletion of an Issue."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
response = self.client.get("/issue/delete/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertRaises(ObjectDoesNotExist, Issue.objects.get, pk=issue.pk)
def test_permission_denied(self):
"""When user doesn't have permission do nothing."""
c = Client()
c.force_login(self.test_user_2)
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
response = c.get("/issue/delete/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).name, issue.name)
class IssueDoneTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a", is_superuser=True)
self.test_user_2 = User.objects.create(username="user_b")
self.test_user_3 = User.objects.create(username="user_c")
self.client_1 = Client()
self.client_1.force_login(self.test_user_1)
self.client_2 = Client()
self.client_2.force_login(self.test_user_2)
def test_superuser_done_assigned(self):
"""Test marking issue as done as superuser while previously been marked as assigned."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = self.client_1.get("/issue/done/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_DONE)
def test_solver_done_assigned(self):
"""Test marking issue as done as solver while previously been marked as assigned."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = self.client_2.get("/issue/done/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_DONE)
def test_superuser_done_unassigned(self):
"""Test marking issue as done as superuser while not been previously marked as assigned."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
response = self.client_1.get("/issue/done/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_DONE)
def test_permission_denied_assigned(self):
"""When user doesn't haver permission do nothing."""
c = Client()
c.force_login(self.test_user_3)
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = c.get("/issue/done/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_DONE)
def test_permission_denied_unassigned(self):
"""When user doesn't haver permission do nothing."""
c = Client()
c.force_login(self.test_user_3)
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
response = c.get("/issue/done/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_DONE)
class IssueCancelTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a", is_superuser=True)
self.test_user_2 = User.objects.create(username="user_b")
self.test_user_3 = User.objects.create(username="user_c")
self.client_1 = Client()
self.client_1.force_login(self.test_user_1)
self.client_2 = Client()
self.client_2.force_login(self.test_user_2)
def test_superuser_cancel_assigned(self):
"""Test marking issue as canceled as superuser while previously been marked as assigned."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = self.client_1.get("/issue/cancel/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_CANCELED)
def test_solver_cancel_assigned(self):
"""Test marking issue as canceled as solver while previously been marked as assigned."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = self.client_2.get("/issue/cancel/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_CANCELED)
def test_superuser_cancel_unassigned(self):
"""Test marking issue as canceled as superuser while not been previously marked as assigned."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
response = self.client_1.get("/issue/cancel/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_CANCELED)
def test_permission_denied_assigned(self):
"""When user doesn't haver permission do nothing."""
c = Client()
c.force_login(self.test_user_3)
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = c.get("/issue/cancel/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_CANCELED)
def test_permission_denied_unassigned(self):
"""When user doesn't have permission do nothing."""
c = Client()
c.force_login(self.test_user_3)
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.")
response = c.get("/issue/cancel/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_CANCELED)
def test_superuser_cancel_done(self):
"""When user try to mark done issue as canceled do nothing."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, description="Test description.",
state=ISSUE_DONE)
response = self.client_1.get("/issue/cancel/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
self.assertNotEqual(Issue.objects.get(pk=issue.pk).state, ISSUE_CANCELED)
class UnassignedTestCase(TestCase):
def setUp(self):
self.test_user_1 = User.objects.create(username="user_a", is_superuser=True)
self.test_user_2 = User.objects.create(username="user_b")
self.client = Client()
self.client.force_login(self.test_user_1)
def test_correct_state(self):
"""Test unassigning solver."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
response = self.client.get("/issue/unassign/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
new_issue = Issue.objects.get(pk=issue.pk)
self.assertEqual(new_issue.state, ISSUE_CREATED)
self.assertIsNone(new_issue.solver)
self.assertIsNone(new_issue.assigned_at)
def test_done_state(self):
"""Test that solver can't be removed when issue was marked as done."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
issue.state = ISSUE_DONE
issue.save()
response = self.client.get("/issue/unassign/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
new_issue = Issue.objects.get(pk=issue.pk)
self.assertEqual(new_issue.state, ISSUE_DONE)
self.assertIsNotNone(new_issue.solver)
self.assertIsNotNone(new_issue.assigned_at)
def test_permission_denied(self):
"""When user doesn't haver permission do nothing."""
issue = Issue.objects.create(name="Test", created_by=self.test_user_1, solver=self.test_user_2,
description="Test description.")
c = Client()
c.force_login(self.test_user_2)
response = c.get("/issue/unassign/%d/" % issue.pk)
self.assertEqual(response.status_code, 302)
new_issue = Issue.objects.get(pk=issue.pk)
self.assertEqual(new_issue.state, ISSUE_ASSIGNED)
self.assertIsNotNone(new_issue.solver)
self.assertIsNotNone(new_issue.assigned_at)
| 44.888889 | 116 | 0.661193 | 2,153 | 16,564 | 4.911287 | 0.068277 | 0.067335 | 0.076036 | 0.041801 | 0.858426 | 0.847267 | 0.80471 | 0.789389 | 0.781823 | 0.732079 | 0 | 0.012006 | 0.210517 | 16,564 | 368 | 117 | 45.01087 | 0.796589 | 0.095569 | 0 | 0.677291 | 0 | 0 | 0.082642 | 0 | 0 | 0 | 0 | 0 | 0.286853 | 1 | 0.143426 | false | 0 | 0.015936 | 0 | 0.187251 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b6966c647cad21330c65797757d98c9f314d8102 | 257 | py | Python | chef.py | samnalkande/basic-program-by-python | 1f7eabe2cab18b694f6649e33dccb26eba08feae | [
"MIT"
] | null | null | null | chef.py | samnalkande/basic-program-by-python | 1f7eabe2cab18b694f6649e33dccb26eba08feae | [
"MIT"
] | null | null | null | chef.py | samnalkande/basic-program-by-python | 1f7eabe2cab18b694f6649e33dccb26eba08feae | [
"MIT"
] | 3 | 2020-10-04T15:56:25.000Z | 2021-10-01T11:31:44.000Z | class chef:
def make_chicken(self):
print("The chef makes a chicken Quickly")
def make_salad(self):
print("The chef makes a salad Quickly")
def make_spacial_dish(self):
print("The chef makes bbq ribs Quickly")
| 25.7 | 50 | 0.63035 | 36 | 257 | 4.388889 | 0.444444 | 0.132911 | 0.227848 | 0.303797 | 0.411392 | 0.278481 | 0 | 0 | 0 | 0 | 0 | 0 | 0.284047 | 257 | 9 | 51 | 28.555556 | 0.858696 | 0 | 0 | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0 | 0 | 0.571429 | 0.428571 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 6 |
1e35565dbda567f7b13341f8807cb81e2000089e | 84 | py | Python | gcd/inference/beam_search/__init__.py | CogComp/gcd | ab8736346b383b2fc9fbe57274b70ed1cd1b9422 | [
"Apache-2.0"
] | 3 | 2021-05-23T23:48:40.000Z | 2021-11-09T06:56:39.000Z | gcd/inference/beam_search/__init__.py | danieldeutsch/gcd | fdf1a0a8628272fca8dd5c9ce182d16428b1ad38 | [
"Apache-2.0"
] | null | null | null | gcd/inference/beam_search/__init__.py | danieldeutsch/gcd | fdf1a0a8628272fca8dd5c9ce182d16428b1ad38 | [
"Apache-2.0"
] | 1 | 2021-11-27T16:38:20.000Z | 2021-11-27T16:38:20.000Z | from gcd.inference.beam_search.constrained_beam_search import ConstrainedBeamSearch
| 42 | 83 | 0.916667 | 10 | 84 | 7.4 | 0.8 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 84 | 1 | 84 | 84 | 0.925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1e3b7613572d86510499f9718cc7cca6d65506ab | 9,624 | py | Python | a10sdk/core/ip/ip_nat.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/ip/ip_nat.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/ip/ip_nat.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class RangeListList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param global_start_ipv6_addr: {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Global Start IPv6 Address of this list", "format": "ipv6-address-plen"}
:param v4_vrid: {"description": "VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}
:param global_netmaskv4: {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Mask for this Address range", "format": "ipv4-netmask"}
:param local_start_ipv6_addr: {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Local Start IPv6 Address of this list", "format": "ipv6-address-plen"}
:param local_netmaskv4: {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Mask for this Address range", "format": "ipv4-netmask"}
:param local_start_ipv4_addr: {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Local Start IPv4 Address of this list", "format": "ipv4-address"}
:param global_start_ipv4_addr: {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Global Start IPv4 Address of this list", "format": "ipv4-address"}
:param v6_vrid: {"description": "VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}
:param v4_count: {"description": "Number of addresses to be translated in this range", "format": "number", "optional": true, "maximum": 200000, "minimum": 1, "modify-not-allowed": 1, "type": "number"}
:param v6_count: {"description": "Number of addresses to be translated in this range", "format": "number", "optional": true, "maximum": 200000, "minimum": 1, "modify-not-allowed": 1, "type": "number"}
:param name: {"description": "Name for this Static List", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "range-list-list"
self.DeviceProxy = ""
self.uuid = ""
self.global_start_ipv6_addr = ""
self.v4_vrid = ""
self.global_netmaskv4 = ""
self.local_start_ipv6_addr = ""
self.local_netmaskv4 = ""
self.local_start_ipv4_addr = ""
self.global_start_ipv4_addr = ""
self.v6_vrid = ""
self.v4_count = ""
self.v6_count = ""
self.name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Nat(A10BaseClass):
"""Class Description::
Configure NAT.
Class nat supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param range_list_list: {"minItems": 1, "items": {"type": "range-list"}, "uniqueItems": true, "array": [{"required": ["name"], "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "global-start-ipv6-addr": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Global Start IPv6 Address of this list", "format": "ipv6-address-plen"}, "v4-vrid": {"description": "VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "global-netmaskv4": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Mask for this Address range", "format": "ipv4-netmask"}, "local-start-ipv6-addr": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Local Start IPv6 Address of this list", "format": "ipv6-address-plen"}, "local-netmaskv4": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Mask for this Address range", "format": "ipv4-netmask"}, "local-start-ipv4-addr": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Local Start IPv4 Address of this list", "format": "ipv4-address"}, "global-start-ipv4-addr": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Global Start IPv4 Address of this list", "format": "ipv4-address"}, "v6-vrid": {"description": "VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "v4-count": {"description": "Number of addresses to be translated in this range", "format": "number", "optional": true, "maximum": 200000, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "v6-count": {"description": "Number of addresses to be translated in this range", "format": "number", "optional": true, "maximum": 200000, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "name": {"description": "Name for this Static List", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ip/nat/range-list/{name}"}
:param pool_group_list: {"minItems": 1, "items": {"type": "pool-group"}, "uniqueItems": true, "array": [{"required": ["pool-group-name"], "properties": {"member-list": {"minItems": 1, "items": {"type": "member"}, "uniqueItems": true, "array": [{"required": ["pool-name"], "properties": {"uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "pool-name": {"description": "Specify NAT pool name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ip/nat/pool-group/{pool-group-name}/member/{pool-name}"}, "pool-group-name": {"description": "Specify pool group name", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}, "vrid": {"description": "Specify VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ip/nat/pool-group/{pool-group-name}"}
:param pool_list: {"minItems": 1, "items": {"type": "pool"}, "uniqueItems": true, "array": [{"required": ["pool-name"], "properties": {"use-if-ip": {"description": "Use Interface IP", "format": "flag", "default": 0, "optional": true, "plat-pos-list": ["soft-ax"], "not": "start-address", "type": "number"}, "uuid": {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}, "start-address": {"description": "Configure start IP address of NAT pool", "format": "ipv4-address", "type": "string", "modify-not-allowed": 1, "not": "use-if-ip", "optional": true}, "vrid": {"description": "Configure VRRP-A vrid (Specify ha VRRP-A vrid)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "netmask": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Configure mask for pool", "format": "ipv4-netmask-brief"}, "end-address": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Configure end IP address of NAT pool", "format": "ipv4-address"}, "ip-rr": {"description": "Use IP address round-robin behavior", "format": "flag", "default": 0, "type": "number", "modify-not-allowed": 1, "optional": true}, "ethernet": {"optional": true, "plat-pos-list": ["soft-ax"], "type": "number", "description": "Ethernet interface", "format": "interface"}, "scaleout-device-id": {"description": "Configure Scaleout device id to which this NAT pool is to be bound (Specify Scaleout device id)", "format": "number", "optional": true, "maximum": 64, "minimum": 1, "modify-not-allowed": 1, "type": "number"}, "gateway": {"optional": true, "modify-not-allowed": 1, "type": "string", "description": "Configure gateway IP", "format": "ipv4-address"}, "pool-name": {"description": "Specify pool name or pool group", "format": "string-rlx", "minLength": 1, "optional": false, "maxLength": 63, "type": "string"}}}], "type": "array", "$ref": "/axapi/v3/ip/nat/pool/{pool-name}"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ip/nat`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "nat"
self.a10_url="/axapi/v3/ip/nat"
self.DeviceProxy = ""
self.range_list_list = []
self.alg = {}
self.pool_group_list = []
self.nat_global = {}
self.template = {}
self.translation = {}
self.icmp = {}
self.inside = {}
self.pool_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| 106.933333 | 2,269 | 0.640067 | 1,219 | 9,624 | 5.002461 | 0.114848 | 0.068875 | 0.086586 | 0.091997 | 0.75287 | 0.735323 | 0.72204 | 0.699738 | 0.671696 | 0.671696 | 0 | 0.023423 | 0.148275 | 9,624 | 89 | 2,270 | 108.134831 | 0.720508 | 0.854738 | 0 | 0.263158 | 0 | 0 | 0.027006 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.026316 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1e68526d43e2092453e905ad532f141b784793ea | 174 | py | Python | images/pannotator/p_procariota/prueba2.py | ezequieljsosa/sndg-bio | 5f709b5b572564ec1dfa40d090eca9a34295743e | [
"MIT"
] | null | null | null | images/pannotator/p_procariota/prueba2.py | ezequieljsosa/sndg-bio | 5f709b5b572564ec1dfa40d090eca9a34295743e | [
"MIT"
] | null | null | null | images/pannotator/p_procariota/prueba2.py | ezequieljsosa/sndg-bio | 5f709b5b572564ec1dfa40d090eca9a34295743e | [
"MIT"
] | 1 | 2020-09-01T15:57:54.000Z | 2020-09-01T15:57:54.000Z | #!/usr/bin/python
hits = []
print len(hits)
if len(hits)==0:
print "sorete"
hits.append("hola")
print len(hits)
if len(hits)!=0:
print "soretedos"
else:
print "cagada"
| 11.6 | 19 | 0.655172 | 28 | 174 | 4.071429 | 0.5 | 0.245614 | 0.210526 | 0.245614 | 0.473684 | 0.473684 | 0.473684 | 0.473684 | 0 | 0 | 0 | 0.013514 | 0.149425 | 174 | 14 | 20 | 12.428571 | 0.756757 | 0.091954 | 0 | 0.2 | 0 | 0 | 0.159236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
1eba0afbcf489e0ed3ce0a02802db552768a4884 | 61 | py | Python | JDjango/api/djangotools/views/__init__.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | 3 | 2020-12-28T05:09:02.000Z | 2021-06-23T10:02:03.000Z | JDjango/api/djangotools/views/__init__.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | null | null | null | JDjango/api/djangotools/views/__init__.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | null | null | null | from .gets import *
from .sets import *
from .judge import *
| 15.25 | 20 | 0.704918 | 9 | 61 | 4.777778 | 0.555556 | 0.465116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196721 | 61 | 3 | 21 | 20.333333 | 0.877551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
1ec984be7323e88fe785516fd8e9a49347855543 | 3,390 | py | Python | wod/characters/migrations/0017_auto_20150424_1354.py | wlansu/wod | 7a91747c7e25f9304c42ef6418612d3b391f4662 | [
"MIT"
] | null | null | null | wod/characters/migrations/0017_auto_20150424_1354.py | wlansu/wod | 7a91747c7e25f9304c42ef6418612d3b391f4662 | [
"MIT"
] | null | null | null | wod/characters/migrations/0017_auto_20150424_1354.py | wlansu/wod | 7a91747c7e25f9304c42ef6418612d3b391f4662 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('characters', '0016_auto_20150424_1352'),
]
operations = [
migrations.AlterField(
model_name='magecharacter',
name='death',
field=models.IntegerField(default=0, null=True, verbose_name='Death', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='fate',
field=models.IntegerField(default=0, null=True, verbose_name='Fate', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='forces',
field=models.IntegerField(default=0, null=True, verbose_name='Forces', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='life',
field=models.IntegerField(default=0, null=True, verbose_name='Life', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='matter',
field=models.IntegerField(default=0, null=True, verbose_name='Matter', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='mind',
field=models.IntegerField(default=0, null=True, verbose_name='Mind', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='prime',
field=models.IntegerField(default=0, null=True, verbose_name='Prime', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='space',
field=models.IntegerField(default=0, null=True, verbose_name='Space', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='spirit',
field=models.IntegerField(default=0, null=True, verbose_name='Spirit', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
migrations.AlterField(
model_name='magecharacter',
name='time',
field=models.IntegerField(default=0, null=True, verbose_name='Time', blank=True, choices=[(0, b'0'), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5')]),
preserve_default=True,
),
]
| 45.2 | 171 | 0.526844 | 435 | 3,390 | 4.018391 | 0.117241 | 0.114416 | 0.143021 | 0.165904 | 0.852403 | 0.852403 | 0.826087 | 0.826087 | 0.826087 | 0.540046 | 0 | 0.059155 | 0.266962 | 3,390 | 74 | 172 | 45.810811 | 0.644266 | 0.006195 | 0 | 0.588235 | 0 | 0 | 0.095337 | 0.006831 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.073529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
948e8065a482214b7a97b001b999cf189f956a05 | 198 | py | Python | api/admin.py | crypticleopard/IIPEDIA | 33717ccd2ff0f809dcb1953880769f423f64474c | [
"MIT"
] | 1 | 2021-05-01T19:20:32.000Z | 2021-05-01T19:20:32.000Z | api/admin.py | crypticleopard/IIPEDIA | 33717ccd2ff0f809dcb1953880769f423f64474c | [
"MIT"
] | null | null | null | api/admin.py | crypticleopard/IIPEDIA | 33717ccd2ff0f809dcb1953880769f423f64474c | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Book,Teacher,Review,Community
admin.site.register(Book)
admin.site.register(Teacher)
admin.site.register(Review)
admin.site.register(Community)
| 24.75 | 49 | 0.828283 | 28 | 198 | 5.857143 | 0.428571 | 0.219512 | 0.414634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065657 | 198 | 7 | 50 | 28.285714 | 0.886486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
94cfd0cef295f1a6fc8025675657ba1930f81775 | 9,271 | py | Python | tests/test_basic_api.py | talavis/dds_web | 576b5e36e981182cc7f3440c96bf7d1a038dfaef | [
"BSD-3-Clause"
] | 3 | 2021-06-18T09:38:28.000Z | 2022-02-28T19:37:54.000Z | tests/test_basic_api.py | talavis/dds_web | 576b5e36e981182cc7f3440c96bf7d1a038dfaef | [
"BSD-3-Clause"
] | 610 | 2021-05-12T08:33:31.000Z | 2022-03-31T14:55:05.000Z | tests/test_basic_api.py | MatthiasZepper/dds_web | 28c297f15017eaf17328b607ba242c9587c24eb9 | [
"BSD-3-Clause"
] | 12 | 2021-05-19T10:33:45.000Z | 2022-03-16T10:23:27.000Z | # IMPORTS ################################################################################ IMPORTS #
# Standard library
import flask
import http
import datetime
# Installed
from jwcrypto import jwk, jws
# Own
import tests
import dds_web
from dds_web.api.user import encrypted_jwt_token, jwt_token
# TESTS #################################################################################### TESTS #
def test_auth_check_statuscode_401_missing_info(client):
"""
Test that the auth endpoint returns:
Status code: 401/UNAUTHORIZED
Message: Missing or incorrect credentials
"""
# No params, no auth
response = client.get(tests.DDSEndpoint.TOKEN)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Missing or incorrect credentials" in response_json.get("message")
def test_auth_incorrect_username_check_statuscode_401_incorrect_info(client):
"""Test that the auth endpoint returns
Status code: 401/UNAUTHORIZED
Message: Missing or incorrect credentials
"""
response = client.get(
tests.DDSEndpoint.TOKEN, auth=tests.UserAuth(tests.USER_CREDENTIALS["nouser"]).as_tuple()
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Missing or incorrect credentials" == response_json.get("message")
def test_auth_incorrect_username_and_password_check_statuscode_401_incorrect_info(client):
"""Test that the auth endpoint returns
Status code: 401/UNAUTHORIZED
Message: Missing or incorrect credentials
"""
response = client.get(
tests.DDSEndpoint.TOKEN,
auth=tests.UserAuth(tests.USER_CREDENTIALS["nopassword"]).as_tuple(),
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Missing or incorrect credentials" == response_json.get("message")
def test_auth_incorrect_password_check_statuscode_401_incorrect_info(client):
"""Test that the auth endpoint returns
Status code: 401/UNAUTHORIZED
Message: Missing or incorrect credentials
"""
response = client.get(
tests.DDSEndpoint.TOKEN, auth=tests.UserAuth(tests.USER_CREDENTIALS["wronguser"]).as_tuple()
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Missing or incorrect credentials" == response_json.get("message")
def test_auth_correctauth_check_statuscode_200_correct_info(client):
"""Test that the auth endpoint returns
Status code: 200/OK
Token: includes the authenticated username
"""
response = client.get(
tests.DDSEndpoint.TOKEN,
auth=tests.UserAuth(tests.USER_CREDENTIALS["researchuser"]).as_tuple(),
)
assert response.status_code == http.HTTPStatus.OK
response_json = response.json
assert response_json.get("token")
jwstoken = jws.JWS()
jwstoken.deserialize(response_json.get("token"))
jwstoken.verify(jwk.JWK.from_password(flask.current_app.config.get("SECRET_KEY")))
# extracting JWS token payload before verification will raise a `InvalidJWSOperation` error
payload = jws.json_decode(jwstoken.payload)
assert (
payload.get("sub") == tests.UserAuth(tests.USER_CREDENTIALS["researchuser"]).as_tuple()[0]
)
def test_auth_incorrect_token_without_periods(client):
"""Test that a malformatted token returns unauthorized"""
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": "Bearer " + "madeuptoken"},
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Invalid token" == response_json.get("message")
def test_auth_incorrect_token_with_periods(client):
"""Test that a made up token returns unauthorized"""
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": "Bearer made.up.token"},
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Invalid token" == response_json.get("message")
def test_auth_expired_signed_token(client):
"""Test that an signed expired token returns unauthorized"""
token = dds_web.api.user.jwt_token("researchuser", expires_in=datetime.timedelta(hours=-2))
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Expired token" == response_json.get("message")
def test_auth_token_wrong_secret_key_signed_token(client):
"""Test that an encrypted token signed with the wrong key returns unauthorized"""
old_secret = flask.current_app.config.get("SECRET_KEY")
flask.current_app.config["SECRET_KEY"] = "XX" * 16
token = dds_web.api.user.jwt_token("researchuser", expires_in=datetime.timedelta(hours=-2))
# reset secret key
flask.current_app.config["SECRET_KEY"] = old_secret
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Invalid token" == response_json.get("message")
def test_auth_with_token(client):
"""Test that token can be used for authentication"""
response = client.get(
tests.DDSEndpoint.TOKEN,
auth=tests.UserAuth(tests.USER_CREDENTIALS["researchuser"]).as_tuple(),
)
assert response.status_code == http.HTTPStatus.OK
response_json = response.json
assert response_json.get("token")
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": "Bearer " + response_json.get("token")},
)
assert response.status_code == http.HTTPStatus.OK
response_json = response.json
assert response_json.get("public")
# ENCRYPTED TOKEN ################################################################ ENCRYPTED TOKEN #
def test_auth_expired_encrypted_token(client):
"""Test that an encrypted expired token returns unauthorized"""
token = dds_web.api.user.encrypted_jwt_token(
"researchuser", None, expires_in=datetime.timedelta(hours=-2)
)
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Expired token" == response_json.get("message")
def test_auth_token_wrong_secret_key_encrypted_token(client):
"""Test that an encrypted token signed with the wrong key returns unauthorized"""
old_secret = flask.current_app.config.get("SECRET_KEY")
flask.current_app.config["SECRET_KEY"] = "XX" * 16
token = dds_web.api.user.encrypted_jwt_token(
"researchuser", None, expires_in=datetime.timedelta(hours=-2)
)
# reset secret key
flask.current_app.config["SECRET_KEY"] = old_secret
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
response_json = response.json
assert response_json.get("message")
assert "Invalid token" == response_json.get("message")
def test_auth_with_encrypted_token(client):
"""Test that token can be used for authentication"""
response = client.get(
tests.DDSEndpoint.ENCRYPTED_TOKEN,
auth=tests.UserAuth(tests.USER_CREDENTIALS["researchuser"]).as_tuple(),
)
assert response.status_code == http.HTTPStatus.OK
response_json = response.json
assert response_json.get("token")
# Fetch the project public key as an example
response = client.get(
tests.DDSEndpoint.PROJ_PUBLIC,
query_string={"project": "public_project_id"},
headers={"Authorization": "Bearer " + response_json.get("token")},
)
assert response.status_code == http.HTTPStatus.OK
response_json = response.json
assert response_json.get("public")
| 36.5 | 100 | 0.700572 | 1,102 | 9,271 | 5.692378 | 0.112523 | 0.110952 | 0.066954 | 0.070142 | 0.898932 | 0.879962 | 0.86944 | 0.856528 | 0.846804 | 0.827674 | 0 | 0.005087 | 0.17312 | 9,271 | 253 | 101 | 36.644269 | 0.813201 | 0.169777 | 0 | 0.679245 | 0 | 0 | 0.135375 | 0 | 0 | 0 | 0 | 0 | 0.257862 | 1 | 0.081761 | false | 0.025157 | 0.044025 | 0 | 0.125786 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
94d4e1faf553d8369a5bbe76fbbdcd4eba168e89 | 27 | py | Python | netbox_netdisco/core/notification/__init__.py | mksoska/netbox-plugin-netdisco | 7c1324f075b47ddd9adfbdf7e19d5afef09f22fd | [
"MIT"
] | 1 | 2021-11-11T11:50:14.000Z | 2021-11-11T11:50:14.000Z | netbox_netdisco/core/notification/__init__.py | mksoska/netbox-plugin-netdisco | 7c1324f075b47ddd9adfbdf7e19d5afef09f22fd | [
"MIT"
] | null | null | null | netbox_netdisco/core/notification/__init__.py | mksoska/netbox-plugin-netdisco | 7c1324f075b47ddd9adfbdf7e19d5afef09f22fd | [
"MIT"
] | null | null | null | from .icinga2 import Icinga | 27 | 27 | 0.851852 | 4 | 27 | 5.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 0.111111 | 27 | 1 | 27 | 27 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
bf60084f863e48637b70138c2bbd2ab84c595adf | 188 | py | Python | src/clearskies_aws/contexts/__init__.py | cmancone/clearskies-aws | 6dc8714127c67553c7c161ec82756680b848113b | [
"MIT"
] | null | null | null | src/clearskies_aws/contexts/__init__.py | cmancone/clearskies-aws | 6dc8714127c67553c7c161ec82756680b848113b | [
"MIT"
] | null | null | null | src/clearskies_aws/contexts/__init__.py | cmancone/clearskies-aws | 6dc8714127c67553c7c161ec82756680b848113b | [
"MIT"
] | null | null | null | from .lambda_api_gateway import lambda_api_gateway
from .lambda_elb import lambda_elb
from .lambda_http_gateway import lambda_http_gateway
from .lambda_invocation import lambda_invocation
| 37.6 | 52 | 0.893617 | 28 | 188 | 5.571429 | 0.285714 | 0.25641 | 0.205128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 188 | 4 | 53 | 47 | 0.906977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
78493fa74bf8864381ba00f7ad6b063a332e8722 | 45,566 | py | Python | render.py | Perlkonig/avabur-clan-stats | 66deb49f60e31ff33af7462fa11d36ceee3a4286 | [
"MIT"
] | null | null | null | render.py | Perlkonig/avabur-clan-stats | 66deb49f60e31ff33af7462fa11d36ceee3a4286 | [
"MIT"
] | 5 | 2018-05-10T19:25:17.000Z | 2018-07-29T18:31:38.000Z | render.py | Perlkonig/avabur-clan-stats | 66deb49f60e31ff33af7462fa11d36ceee3a4286 | [
"MIT"
] | 2 | 2018-05-11T01:38:48.000Z | 2020-09-09T12:27:35.000Z | #!/usr/local/bin/python3
import json
import sqlite3
import csv
import os
import math
import numpy
def level2xp(lvl):
#up to level 111
xptiers = [20000, 121678, 350851, 745429, 1339951, 2167182, 3258923, 4646498, 6361081, 8433930, 10896556, 13780871, 17119294, 20944855, 25291273, 30193033, 35685457, 41804763, 48588126, 56073734, 64300844, 73309830, 83142239, 93840842, 105449680, 118014118, 131580895, 146198170, 161915581, 178784289, 196857031, 216188176, 236833776, 258851620, 282301291, 307244220, 333743747, 361865174, 391675830, 423245132, 456644644, 491948143, 529231687, 568573676, 610054927, 653758741, 699770974, 748180144, 799077355, 852556671, 908714903, 967651833, 1029470270, 1094276137, 1162178557, 1233289942, 1307726087, 1385606263, 1467053316, 1552193763, 1641157895, 1734079883, 1831097884, 1932354148, 2037995137, 2148171633, 2263038863, 2382000000, 2507000000, 2637406405, 2772681975, 2913495393, 3060031195, 3212479279, 3371035044, 3535899544, 3707279636, 3885388140, 4070443993, 4262672421, 4462305102, 4669580337, 4884743233, 5108045881, 5339747538, 5580114827, 5829421923, 6087950759, 6355991231, 6633841407, 6921807744, 7220205308, 7529358005, 7849598808, 8181270001, 8524723420, 8880320706, 9248433560, 9629444009, 10023744672, 10431739043, 10853841770, 11290478946, 11742088411, 12209120054, 12692036127, 13191311569, 13707434329, 14240905711, 14792240714, 15361968389]
assert(len(xptiers) == 111)
whole = math.floor(lvl)
decimal = lvl - whole
# print("Level: {}, Whole: {}, Decimal: {}".format(lvl, whole, decimal))
xp = 0
for i in range(whole):
# print("\tAdding xp for lvl {}".format(i+1))
xp += xptiers[i]
xp += int(round(xptiers[whole] * decimal))
return xp
def calcDeltas(lst):
if (lst == None) or (len(lst) == 0):
return None
elif len(lst) == 1:
return [0]
else:
ret = []
for i in range(1, len(lst)):
ret.append(lst[i] - lst[i-1])
assert len(ret) == len(lst)-1
return ret
def buildData(dates, deltas):
ret = []
for i in range(len(deltas)):
if len(dates) > len(deltas):
ret.append((dates[i+1], deltas[i]))
else:
ret.append((dates[i], deltas[i]))
return ret
def trimOutliers(lst, percent, ceil=True):
lst = sorted(lst)
count = len(lst) * percent
if ceil:
count = math.ceil(count)
else:
count = math.floor(count)
if (count*2 >= len(lst)):
return(lst)
else:
return lst[count:len(lst)-count]
def trimDataByStd(lst, maxdev):
flat = [x[1] for x in lst]
std = numpy.std(flat)
if std == 0:
return lst
mean = numpy.mean(flat)
# print("Mean: {}, Std: {}".format(mean, std))
result = list()
for x in lst:
if abs(x[1] - mean) / std > maxdev:
result.append((x[0], None))
else:
result.append(x)
return result
def calcMedian(lst):
lst = sorted(lst)
if (len(lst) % 2 == 1):
return lst[(len(lst)-1)//2]
else:
val1 = lst[int(math.floor((len(lst)-1)/2))]
val2 = lst[int(math.ceil((len(lst)-1)/2))]
return (val1 + val2) / 2
#Load settings
with open('/home/protected/avabur/settings.json') as j:
settings = json.load(j)
clandays = 0
if 'clandays' in settings:
clandays = int(settings['clandays'])
actiondays = 0
if 'actiondays' in settings:
actiondays = int(settings['actiondays'])
byslicedays = 0
if 'byslicedays' in settings:
byslicedays = int(settings['byslicedays'])
leveldays = 0
if 'leveldays' in settings:
leveldays = int(settings['leveldays'])
leveldays_maxlvls = 0
if 'leveldays_maxlvls' in settings:
leveldays_maxlvls = int(settings['leveldays_maxlvls'])
allclans = ["Us"]
for rival in settings['rivals']:
allclans.append(rival['name'])
#Load/Initialize database
try:
conn = sqlite3.connect(settings['dbfile'])
except sqlite3.DatabaseError as e:
raise sqlite3.DatabaseError(repr(e))
c = conn.cursor()
#xp gained
xpdata = dict()
if (clandays > 0):
c.execute("SELECT datestamp, xp, level FROM clan WHERE level IS NOT NULL AND (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT datestamp, xp, level FROM clan WHERE level IS NOT NULL ORDER BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
xps = [(level2xp(x[2]) + x[1]) for x in recs]
xpdeltas = calcDeltas(xps)
for entry in buildData(dates, xpdeltas):
xpdata[entry[0]] = {'Us': entry[1]}
for rival in settings['rivals']:
if (clandays > 0):
c.execute("SELECT datestamp, xp, level FROM rivalclans WHERE clanid=? AND (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (rival['id'], clandays))
else:
c.execute("SELECT datestamp, xp, level FROM rivalclans WHERE clanid=? ORDER BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
xps = [(level2xp(x[2]) + x[1]) for x in recs]
xpdeltas = calcDeltas(xps)
for entry in buildData(dates, xpdeltas):
xpdata[entry[0]][rival['name']] = entry[1]
with open(os.path.join(settings['csvdir'], 'clan_xp.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in xpdata.keys():
row = [date]
for clan in allclans:
if clan in xpdata[date]:
row.append(xpdata[date][clan])
else:
row.append(None)
csvw.writerow(row)
#total actions
if (clandays > 0):
c.execute("SELECT datestamp, sum(totalacts), count() FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) GROUP BY datestamp ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT datestamp, sum(totalacts), count() FROM members GROUP BY datestamp ORDER BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
totals = [x[1] for x in recs]
counts = [x[2] for x in recs]
deltas = calcDeltas(totals)
avgs = [round(deltas[i] / counts[i]) for i in range(len(deltas))]
# avgs = [round(totals[i] / counts[i]) for i in range(len(totals))]
## Try to trim really wide swings
actions_total_whatiswide = 500000
actions_average_whatiswide = 50000
if 'actions_total_whatiswide' in settings:
actions_total_whatiswide = settings['actions_total_whatiswide']
if 'actions_average_whatiswide' in settings:
actions_average_whatiswide = settings['actions_average_whatiswide']
dd = calcDeltas(deltas)
for i in range(len(dd)):
if abs(dd[i]) > actions_total_whatiswide:
deltas[i+1] = None
dd = calcDeltas(avgs)
for i in range(len(dd)):
if abs(dd[i]) > actions_average_whatiswide:
avgs[i+1] = None
totaldata = buildData(dates, deltas)
avgdata = buildData(dates, avgs)
with open(os.path.join(settings['csvdir'], 'clan_actions_total.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date", "Total Actions"])
for row in totaldata:
csvw.writerow(row)
with open(os.path.join(settings['csvdir'], 'clan_actions_avg.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date", "Average Actions"])
for row in avgdata:
csvw.writerow(row)
#aggregate donations (other than xp)
if (clandays > 0):
c.execute("SELECT datestamp, d_crystals, d_platinum, d_gold, d_food, d_wood, d_iron, d_stone FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) GROUP BY datestamp ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT datestamp, d_crystals, d_platinum, d_gold, d_food, d_wood, d_iron, d_stone FROM members GROUP BY datestamp ORDER BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
plat = [x[2] for x in recs]
gold = [x[3] for x in recs]
plat = calcDeltas(plat)
## Try to trim really wide swings
for i in range(len(plat)):
if plat[i] < 0:
plat[i] = None
gold = calcDeltas(gold)
## Try to trim really wide swings
for i in range(len(gold)):
if gold[i] < 0:
gold[i] = None
platdata = buildData(dates, plat)
golddata = buildData(dates, gold)
with open(os.path.join(settings['csvdir'], 'clan_donations_plat.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date", "Platinum"])
for row in platdata:
csvw.writerow(row)
with open(os.path.join(settings['csvdir'], 'clan_donations_gold.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date", "Gold"])
for row in golddata:
csvw.writerow(row)
#per-user total actions
## First get maxdate
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (clandays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
alldates.pop(0)
## Now get their total action data
rawdata = dict()
for u in usernames:
rawdata[u] = list()
if (clandays > 0):
c.execute("SELECT datestamp, totalacts FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, clandays])
else:
c.execute("SELECT datestamp, totalacts FROM members WHERE username=?", [u])
for row in c:
rawdata[u].append((row[0], row[1]))
## Now turn that into deltas for each user
deltadata = dict()
for u in usernames:
dates = [x[0] for x in rawdata[u]]
counts = [x[1] for x in rawdata[u]]
deltas = calcDeltas(counts)
deltadata[u] = buildData(dates, deltas)
## Now convert that into a format suitable for CSV output (rows are dates, users are columns)
## This uses a number nested loops. It's not the most efficient, but it's good enough.
csvout = []
csvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in deltadata[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
csvout.append(row)
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_actions.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in csvout:
csvw.writerow(row)
#per-user base stats
## First get list of all users
c.execute("SELECT DISTINCT(username) FROM members ORDER BY username COLLATE NOCASE")
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (clandays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
## Now get their total action data
rawdata = dict()
for u in usernames:
rawdata[u] = list()
if (clandays > 0):
c.execute("SELECT datestamp, stats FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, clandays])
else:
c.execute("SELECT datestamp, stats FROM members WHERE username=?", [u])
for row in c:
rawdata[u].append((row[0], row[1]))
## Now convert that into a format suitable for CSV output (rows are dates, users are columns)
## This uses a number nested loops. It's not the most efficient, but it's good enough.
csvout = []
csvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for stat in rawdata[u]:
if (stat[0] == d):
found = True
row.append(stat[1])
break
if not found:
row.append(None)
csvout.append(row)
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_stats.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in csvout:
csvw.writerow(row)
#per-user xp donations
## Get latest date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of all current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (clandays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
alldates.pop(0)
## Now get their xp donation data
rawdata = dict()
for u in usernames:
rawdata[u] = list()
if (clandays > 0):
c.execute("SELECT datestamp, d_xp FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, clandays])
else:
c.execute("SELECT datestamp, d_xp FROM members WHERE username=?", [u])
for row in c:
rawdata[u].append((row[0], row[1]))
## Now turn that into deltas for each user
deltadata = dict()
for u in usernames:
dates = [x[0] for x in rawdata[u]]
counts = [x[1] for x in rawdata[u]]
deltas = calcDeltas(counts)
deltadata[u] = buildData(dates, deltas)
xpdates = list()
xpdeltas = list()
## Now convert that into a format suitable for CSV output (rows are dates, users are columns)
## This uses a number nested loops. It's not the most efficient, but it's good enough.
csvout = []
csvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
xpdates.append(d)
node = 0
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in deltadata[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
node += delta[1]
break
if not found:
row.append(None)
xpdeltas.append(node)
csvout.append(row)
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_xpdonated.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in csvout:
csvw.writerow(row)
#per-user gold donations
## Get latest date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of all current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (clandays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
alldates.pop(0)
## Now get their xp donation data
rawdata = dict()
for u in usernames:
rawdata[u] = list()
if (clandays > 0):
c.execute("SELECT datestamp, d_gold FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, clandays])
else:
c.execute("SELECT datestamp, d_gold FROM members WHERE username=?", [u])
for row in c:
rawdata[u].append((row[0], row[1]))
## Now turn that into deltas for each user
deltadata = dict()
for u in usernames:
dates = [x[0] for x in rawdata[u]]
counts = [x[1] for x in rawdata[u]]
deltas = calcDeltas(counts)
deltadata[u] = buildData(dates, deltas)
## Now convert that into a format suitable for CSV output (rows are dates, users are columns)
## This uses a number nested loops. It's not the most efficient, but it's good enough.
csvout = []
csvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in deltadata[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
csvout.append(row)
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_golddonated.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in csvout:
csvw.writerow(row)
#per-user plat donations
## Get latest date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of all current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (clandays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
alldates.pop(0)
## Now get their plat donation data
rawdata = dict()
for u in usernames:
rawdata[u] = list()
if (clandays > 0):
c.execute("SELECT datestamp, d_platinum FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, clandays])
else:
c.execute("SELECT datestamp, d_platinum FROM members WHERE username=?", [u])
for row in c:
rawdata[u].append((row[0], row[1]))
## Now turn that into deltas for each user
deltadata = dict()
for u in usernames:
dates = [x[0] for x in rawdata[u]]
counts = [x[1] for x in rawdata[u]]
deltas = calcDeltas(counts)
deltadata[u] = buildData(dates, deltas)
## Now convert that into a format suitable for CSV output (rows are dates, users are columns)
## This uses a number nested loops. It's not the most efficient, but it's good enough.
csvout = []
csvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in deltadata[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
csvout.append(row)
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_platdonated.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in csvout:
csvw.writerow(row)
#activity status
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
c.execute("SELECT username, (STRFTIME('%s', 'now') - lastactive) AS inactive FROM members WHERE datestamp=? AND inactive>= 86400 ORDER BY inactive", [maxdate])
recs = c.fetchall()
recs = [(x[0], math.floor(x[1]/86400)) for x in recs]
with open(os.path.join(settings['csvdir'], 'individual_lastactive.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Member", "Time Inactive"])
for row in recs:
csvw.writerow(row)
#per-user average actions
## Get max date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of current users
c.execute("SELECT DISTINCT(username) FROM members where datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Now get their total action data
actions_outliers_percent = 0.1
if 'actions_outliers_percent' in settings:
actions_outliers_percent = settings['actions_outliers_percent']
avgacts = list()
for u in usernames:
totals = []
if (actiondays > 0):
c.execute("SELECT totalacts FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", [u, actiondays])
else:
c.execute("SELECT totalacts FROM members WHERE username=? ORDER BY datestamp", [u])
for row in c:
totals.append(row[0])
deltas = calcDeltas(totals)
deltas = trimOutliers(deltas, actions_outliers_percent)
avg = round(sum(deltas) / len(deltas))
avgacts.append((u, avg))
## sort by average
avgacts = sorted(avgacts, key=lambda x: x[1])
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_avgacts.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Member","Average Actions"])
for row in avgacts:
csvw.writerow(row)
#per-user median actions
## Get max date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of current users
c.execute("SELECT DISTINCT(username) FROM members where datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Now get their total action data
medacts = list()
for u in usernames:
totals = []
if (actiondays > 0):
c.execute("SELECT totalacts FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", [u, actiondays])
else:
c.execute("SELECT totalacts FROM members WHERE username=? ORDER BY datestamp", [u])
for row in c:
totals.append(row[0])
deltas = calcDeltas(totals)
deltas = trimOutliers(deltas, actions_outliers_percent)
median = round(calcMedian(deltas))
medacts.append((u, median))
## sort by average
medacts = sorted(medacts, key=lambda x: x[1])
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_medacts.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Member","Median Actions"])
for row in medacts:
csvw.writerow(row)
# Treasury status (single graph)
with open(os.path.join(settings['csvdir'], 'clan_treasury.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date","Crystals", "Platinum", "Gold", "Food", "Wood", "Iron", "Stone"])
if (clandays > 0):
c.execute("SELECT datestamp, crystals, platinum, gold, food, wood, iron, stone FROM clan WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT datestamp, crystals, platinum, gold, food, wood, iron, stone FROM clan ORDER BY datestamp")
for row in c:
csvw.writerow(row)
# Battler/harvest ratio
## Get max date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of current users
c.execute("SELECT DISTINCT(username) FROM members where datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get battle/harvest data
treedata = list()
if (actiondays > 0):
c.execute("SELECT username, ((max(kills)-min(kills))+(max(deaths)-min(deaths))) AS battles, ( (max(harvests)-min(harvests))+(max(craftingacts)-min(craftingacts))+(max(carvingacts)-min(carvingacts)) ) AS harvests FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) GROUP BY username", [actiondays])
else:
c.execute("SELECT username, ((max(kills)-min(kills))+(max(deaths)-min(deaths))) AS battles, ( (max(harvests)-min(harvests))+(max(craftingacts)-min(craftingacts))+(max(carvingacts)-min(carvingacts)) ) AS harvests FROM members GROUP BY username")
for row in c:
if row[0] in usernames:
total = row[1] + row[2]
ratio = 0
if (total > 0):
ratio = round(row[1] / total, 2)
treedata.append((row[0], ratio))
treedata = sorted(treedata, key=lambda x: (x[1], x[0].lower()))
with open(os.path.join(settings['csvdir'], 'individual_ratios.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Member", "Ratio"])
for row in treedata:
csvw.writerow(row)
#Rankings table
ranks = {'data': []}
for row in c.execute("SELECT username, skill, rank, level FROM ranks WHERE rank<=100"):
ranks['data'].append(row)
with open(os.path.join(settings['csvdir'], 'ranks.json'), 'w', newline='') as csvfile:
json.dump(ranks, csvfile)
#Nearest clans
# lvldata = list()
# xpdata = list()
# if (clandays > 0):
# c.execute("SELECT datestamp, ours, above, below FROM nearestclans WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
# else:
# c.execute("SELECT datestamp, ours, above, below FROM nearestclans ORDER BY datestamp")
# for row in c:
# lvlnode = [row[0]]
# xpnode = [row[0]]
# if row[2] is not None:
# lvlnode.append(abs(row[1] - row[2]))
# xpnode.append(abs(level2xp(row[1]) - level2xp(row[2])))
# else:
# lvlnode.append(None)
# xpnode.append(None)
# if row[3] is not None:
# lvlnode.append(abs(row[1] - row[3]))
# xpnode.append(abs(level2xp(row[1]) - level2xp(row[3])))
# else:
# lvlnode.append(None)
# xpnode.append(None)
# lvldata.append(lvlnode)
# xpdata.append(xpnode)
# with open(os.path.join(settings['csvdir'], 'clan_nearest_lvl.csv'), 'w', newline='') as csvfile:
# csvw = csv.writer(csvfile, dialect=csv.excel)
# csvw.writerow(["Date","Above", "Below"])
# for row in lvldata:
# csvw.writerow(row)
# with open(os.path.join(settings['csvdir'], 'clan_nearest_xp.csv'), 'w', newline='') as csvfile:
# csvw = csv.writer(csvfile, dialect=csv.excel)
# csvw.writerow(["Date","Above", "Below"])
# for row in xpdata:
# csvw.writerow(row)
#Kill-to-death ratio
## First get maxdate
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (clandays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (clandays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
alldates.pop(0)
## Now get their total action data
rawkills = dict()
rawdeaths = dict()
for u in usernames:
rawkills[u] = list()
rawdeaths[u] = list()
if (clandays > 0):
c.execute("SELECT datestamp, kills, deaths FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, clandays])
else:
c.execute("SELECT datestamp, kills, deaths FROM members WHERE username=?", [u])
for row in c:
rawkills[u].append((row[0], row[1]))
rawdeaths[u].append((row[0], row[2]))
## Now turn that into deltas for each user
kdeltadata = dict()
ddeltadata = dict()
kdratio = dict()
kdpercent = dict()
for u in usernames:
kdates = [x[0] for x in rawkills[u]]
kcounts = [x[1] for x in rawkills[u]]
ddates = [x[0] for x in rawdeaths[u]]
dcounts = [x[1] for x in rawdeaths[u]]
kdeltas = calcDeltas(kcounts)
ddeltas = calcDeltas(dcounts)
avgnode = list()
kdpnode = list()
for i in range(len(kdeltas)):
avg = None
kdp = None
if ddeltas[i] > 0:
avg = kdeltas[i] / ddeltas[i]
avgnode.append(avg)
if (kdeltas[i] + ddeltas[i]) > 0:
kdp = kdeltas[i] / (kdeltas[i] + ddeltas[i])
kdpnode.append(kdp)
kdeltadata[u] = buildData(kdates, kdeltas)
ddeltadata[u] = buildData(ddates, ddeltas)
kdratio[u] = buildData(kdates, avgnode)
kdpercent[u] = buildData(kdates, kdpnode)
## Now convert that into a format suitable for CSV output (rows are dates, users are columns)
## This uses a number nested loops. It's not the most efficient, but it's good enough.
kcsvout = []
kcsvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in kdeltadata[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
kcsvout.append(row)
dcsvout = []
dcsvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in ddeltadata[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
dcsvout.append(row)
kdcsvout = []
kdcsvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in kdratio[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
kdcsvout.append(row)
kdpcsvout = []
kdpcsvout.append(['Date'] + usernames)
### This gives us the row structure
for d in alldates:
row = [d]
### This loop ensures the correct order
for u in usernames:
### Look at each delta entry for the given user and see if it matches the date.
found = False
for delta in kdpercent[u]:
if (delta[0] == d):
found = True
row.append(delta[1])
break
if not found:
row.append(None)
kdpcsvout.append(row)
## Print it!
with open(os.path.join(settings['csvdir'], 'individual_kills.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in kcsvout:
csvw.writerow(row)
with open(os.path.join(settings['csvdir'], 'individual_deaths.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in dcsvout:
csvw.writerow(row)
with open(os.path.join(settings['csvdir'], 'individual_kdratio.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in kdcsvout:
csvw.writerow(row)
with open(os.path.join(settings['csvdir'], 'individual_kdpercent.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
for row in kdpcsvout:
csvw.writerow(row)
#xp donations by 10-level slice
## Get latest date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of all current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Get list of distinct dates
if (byslicedays > 0):
c.execute("SELECT DISTINCT(datestamp) FROM members WHERE (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", (byslicedays,))
else:
c.execute("SELECT DISTINCT(datestamp) FROM members ORDER BY datestamp")
alldates = [x[0] for x in c.fetchall()]
alldates.pop(0)
## Collect xp and level data
rawdata = dict()
for u in usernames:
rawdata[u] = list()
if (byslicedays > 0):
c.execute("SELECT datestamp, d_xp, level FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?)", [u, byslicedays])
else:
c.execute("SELECT datestamp, d_xp, level FROM members WHERE username=?", [u])
for row in c:
rawdata[u].append((row[0], row[1], row[2]))
## Now turn that into deltas for each slice
deltadata = dict()
for u in usernames:
dates = [x[0] for x in rawdata[u]]
counts = [x[1] for x in rawdata[u]]
levels = [x[2] for x in rawdata[u]]
deltadata[u] = (max(levels), max(counts) - min(counts))
levels = [x[0] for x in deltadata.values()]
minlevel = int((min(levels) // 10) * 10)
maxlevel = int(((max(levels) // 10) + 1) * 10)
width = 10
xpsum = sum([x[1] for x in deltadata.values()])
slices = dict()
for base in range(minlevel, maxlevel, width):
users = [x for x in deltadata.keys() if ( (deltadata[x][0] >= base) and (deltadata[x][0] < base+width) )]
slicesum = sum([deltadata[x][1] for x in users])
slicepc = round((slicesum / xpsum) * 10000) / 100
sliceavgabs = 0
if (len(users) > 0):
sliceavgabs = slicesum / len(users)
sliceavgpc = round((sliceavgabs / xpsum) * 10000) / 100
slices[base] = (sorted(users), slicepc, sliceavgpc)
## Print it!
# with open(os.path.join(settings['csvdir'], 'xpdonations_byslice.csv'), 'w', newline='') as csvfile:
# csvw = csv.writer(csvfile, dialect=csv.excel)
# csvw.writerow(["Slice","% xp donations"])
# for base in sorted(slices.keys()):
# csvw.writerow(("{}--{} ({} members)".format(base, base+9, len(slices[base][0])), slices[base][1]))
### JSON version
with open(os.path.join(settings['csvdir'], 'xpdonations_byslice.json'), 'w', newline='') as jsonfile:
jsonfile.write(json.dumps(slices))
# #Days per level
# ## Get latest date
# c.execute("SELECT MAX(datestamp) FROM members")
# maxdate = c.fetchone()[0]
# ## Get list of all current members
# c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
# usernames = [x[0] for x in c.fetchall()]
# ## Collect xp and level data
# data = list()
# for u in usernames:
# lvldays = dict()
# if (leveldays > 0):
# c.execute("SELECT datestamp, level FROM members WHERE username=? AND (julianday('now') - julianday(datestamp) <= ?) ORDER BY datestamp", [u, leveldays])
# else:
# c.execute("SELECT datestamp, level FROM members WHERE username=? ORDER BY datestamp", [u])
# for row in c:
# level = row[1]
# if (level in lvldays):
# lvldays[level] += 1
# else:
# lvldays[level] = 1
# avg = 0
# if (len(lvldays) > 0):
# avg = sum(lvldays.values()) / len(lvldays)
# data.append((u, avg))
# ## Print it!
# with open(os.path.join(settings['csvdir'], 'avg_days_in_level.csv'), 'w', newline='') as csvfile:
# csvw = csv.writer(csvfile, dialect=csv.excel)
# csvw.writerow(["Member","Average days in level"])
# for row in data:
# csvw.writerow(row)
#Days per level
## Get latest date
c.execute("SELECT MAX(datestamp) FROM members")
maxdate = c.fetchone()[0]
## Get list of all current members
c.execute("SELECT DISTINCT(username) FROM members WHERE datestamp=? ORDER BY username COLLATE NOCASE", [maxdate])
usernames = [x[0] for x in c.fetchall()]
## Collect xp and level data
data = list()
for u in usernames:
c.execute("SELECT COUNT(datestamp), level FROM members WHERE username=? GROUP BY level ORDER BY username COLLATE NOCASE;", [u])
days = c.fetchall()
#remove current level because it's not complete yet
days.pop()
#if shorter or equal to max levels, cut off the first element
if ( (len(days) > 0) and ( (leveldays_maxlvls == 0) or (len(days) <= leveldays_maxlvls) ) ):
days.pop(0)
#now trim to length
while (len(days) > leveldays_maxlvls):
days.pop(0)
lvldays = [x[0] for x in days]
avg = 0
if (len(lvldays) > 0):
avg = sum(lvldays) / len(lvldays)
data.append((u, avg, len(days)))
## Print it!
with open(os.path.join(settings['csvdir'], 'avg_days_in_level_full_levels.json'), 'w', newline='') as csvfile:
csvfile.write(json.dumps(data))
# csvw = csv.writer(csvfile, dialect=csv.excel)
# csvw.writerow(["Member","Average days in level"])
# for row in data:
# csvw.writerow(row)
# RIVAL SUMMARY GRAPHS
## Total XP
summary = dict()
c.execute("SELECT datestamp, level, xp FROM clan WHERE level IS NOT NULL ORDER BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
xp = [(level2xp(x[1]) + x[2]) for x in recs]
for entry in buildData(dates, xp):
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, level, xp FROM rivalclans WHERE clanid=? ORDER BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
xp = [(level2xp(x[1]) + x[2]) for x in recs]
for entry in buildData(dates, xp):
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_totalxp.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
## Total actions
summary = dict()
maxdev = 2.0
c.execute("SELECT datestamp, SUM(totalacts) FROM members GROUP BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, SUM(totalacts) FROM rivals WHERE clanid=? GROUP BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_actions.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
## Total levels
summary = dict()
c.execute("SELECT datestamp, SUM(level) FROM members GROUP BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
lvls = [x[1] for x in recs]
data = buildData(dates, lvls)
for entry in data:
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, SUM(level) FROM rivals WHERE clanid=? GROUP BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
lvls = [x[1] for x in recs]
data = buildData(dates, lvls)
for entry in data:
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_levels.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
## Kills
summary = dict()
maxdev = 1.0
c.execute("SELECT datestamp, SUM(kills) FROM members GROUP BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, SUM(kills) FROM rivals WHERE clanid=? GROUP BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_kills.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
## Deaths
summary = dict()
maxdev = 1.0
c.execute("SELECT datestamp, SUM(deaths) FROM members GROUP BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, SUM(deaths) FROM rivals WHERE clanid=? GROUP BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_deaths.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
## Harvests
summary = dict()
maxdev = 1.0
c.execute("SELECT datestamp, SUM(harvests) FROM members GROUP BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, SUM(harvests) FROM rivals WHERE clanid=? GROUP BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_harvests.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
## Crafts/Carves
summary = dict()
maxdev = 1.0
c.execute("SELECT datestamp, (SUM(craftingacts) + SUM(carvingacts)) AS harvests FROM members GROUP BY datestamp")
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
summary[entry[0]] = {"Us": entry[1]}
for rival in settings['rivals']:
c.execute("SELECT datestamp, (SUM(craftingacts) + SUM(carvingacts)) AS harvests FROM rivals WHERE clanid=? GROUP BY datestamp", (rival['id'],))
recs = c.fetchall()
dates = [x[0] for x in recs]
acts = [x[1] for x in recs]
actdeltas = calcDeltas(acts)
data = trimDataByStd(buildData(dates, actdeltas), maxdev)
for entry in data:
if entry[0] in summary:
summary[entry[0]][rival['name']] = entry[1]
else:
summary[entry[0]] = {rival['name']: entry[1]}
with open(os.path.join(settings['csvdir'], 'rivals_crafts.csv'), 'w', newline='') as csvfile:
csvw = csv.writer(csvfile, dialect=csv.excel)
csvw.writerow(["Date"] + allclans)
for date in summary.keys():
row = [date]
for clan in allclans:
if clan in summary[date]:
row.append(summary[date][clan])
else:
row.append(None)
csvw.writerow(row)
c.close()
conn.close()
| 36.865696 | 1,267 | 0.641531 | 6,335 | 45,566 | 4.59779 | 0.07498 | 0.024445 | 0.042778 | 0.009476 | 0.780925 | 0.761081 | 0.751983 | 0.738353 | 0.724379 | 0.69784 | 0 | 0.039521 | 0.215907 | 45,566 | 1,235 | 1,268 | 36.895547 | 0.775722 | 0.16892 | 0 | 0.644518 | 0 | 0.0299 | 0.229781 | 0.023271 | 0 | 0 | 0 | 0 | 0.002215 | 1 | 0.006645 | false | 0 | 0.006645 | 0 | 0.024363 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
786b5a28083873009be21437d1a5f009343e7cfd | 27,090 | py | Python | tests/test_dataflow/test_dataset/test_playground.py | alexandreMayerowitz/playground-plums | a6be79e4c30c7abcbade5581f052a4e8035a2057 | [
"MIT"
] | null | null | null | tests/test_dataflow/test_dataset/test_playground.py | alexandreMayerowitz/playground-plums | a6be79e4c30c7abcbade5581f052a4e8035a2057 | [
"MIT"
] | null | null | null | tests/test_dataflow/test_dataset/test_playground.py | alexandreMayerowitz/playground-plums | a6be79e4c30c7abcbade5581f052a4e8035a2057 | [
"MIT"
] | 2 | 2021-02-03T12:37:53.000Z | 2022-03-09T03:48:12.000Z | import pytest
import numpy as np
from plums.commons.path import Path
from plums.commons.data import Taxonomy, Label, TileCollection
from plums.dataflow.io import dump, RGB, BGR, Tile
from plums.dataflow.io.tile._backend import Image
from plums.dataflow.dataset.playground import PlaygroundDataset, TaxonomyReader, TileDriver, AnnotationDriver
@pytest.fixture()
def reference_image():
return np.array(Image.load(Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'))
def test_taxonomy_reader(tmp_path):
# Prepare taxonomy
taxonomy_file = tmp_path / 'taxonomy.json'
dump({'some_label': {'child': {},
'other': {'nested': {}}},
'root': {}}, taxonomy_file)
nested = Label('nested')
other = Label('other', children=(nested, ))
child = Label('child')
some_label = Label('some_label', children=(child, other))
root = Label('root')
taxonomy_reference = Taxonomy(some_label, root)
# Assert equal
reader = TaxonomyReader()
assert reader(tmp_path) == taxonomy_reference
def test_annotation_driver(tmp_path, json_feature_collection): # noqa: R701
annotation_path = tmp_path / 'annotation.json'
annotation_path.write_text(json_feature_collection)
# +-> Error
driver = AnnotationDriver()
with pytest.raises(ValueError, match='More than one annotation file was provided'):
_ = driver((annotation_path, annotation_path), group='value')
# +-> Base
driver = AnnotationDriver()
annotation = driver((annotation_path, ), group='value')
assert len(annotation.record_collection) == 1
assert annotation.record_collection[0].labels == ('tag', 'class')
assert annotation.record_collection[0].confidence is None
assert annotation.record_collection[0].dataset_id == 'f16fff43-2535-4e34-afec-6404dcdcd545'
assert annotation.record_collection[0].zone_id == '10187fa3-30df-4eb4-a1e9-6b1dcdc79951'
assert annotation.record_collection[0].id == '6e73eff2-06f3-11ea-976a-b2cdca212bc0'
assert \
annotation.mask_collection['zone_footprint'].coordinates == [[[0, 0], [0, 256], [256, 256], [256, 0], [0, 0]]]
assert (annotation_path, ) not in driver._memcache
# +--> Reopen
assert driver((annotation_path, ), group='value') is not annotation
# +-> Confidence
driver = AnnotationDriver(confidence_key='surface')
annotation = driver((annotation_path, ), group='value')
assert len(annotation.record_collection) == 1
assert annotation.record_collection[0].labels == ('tag', 'class')
assert annotation.record_collection[0].confidence - 64.2146176930851 <= 1e-4
assert annotation.record_collection[0].dataset_id == 'f16fff43-2535-4e34-afec-6404dcdcd545'
assert annotation.record_collection[0].zone_id == '10187fa3-30df-4eb4-a1e9-6b1dcdc79951'
assert annotation.record_collection[0].id == '6e73eff2-06f3-11ea-976a-b2cdca212bc0'
assert \
annotation.mask_collection['zone_footprint'].coordinates == [[[0, 0], [0, 256], [256, 256], [256, 0], [0, 0]]]
# +-> Id
driver = AnnotationDriver(record_id_key='owner_id')
annotation = driver((annotation_path, ), group='value')
assert len(annotation.record_collection) == 1
assert annotation.record_collection[0].labels == ('tag', 'class')
assert annotation.record_collection[0].confidence is None
assert annotation.record_collection[0].dataset_id == 'f16fff43-2535-4e34-afec-6404dcdcd545'
assert annotation.record_collection[0].zone_id == '10187fa3-30df-4eb4-a1e9-6b1dcdc79951'
assert annotation.record_collection[0].id == '35e370a9-6b76-4ac6-a3d5-1eeb983c3dc7'
assert \
annotation.mask_collection['zone_footprint'].coordinates == [[[0, 0], [0, 256], [256, 256], [256, 0], [0, 0]]]
# +-> Cache
driver = AnnotationDriver(cache=True)
annotation = driver((annotation_path, ), group='value')
assert len(annotation.record_collection) == 1
assert annotation.record_collection[0].labels == ('tag', 'class')
assert annotation.record_collection[0].confidence is None
assert annotation.record_collection[0].dataset_id == 'f16fff43-2535-4e34-afec-6404dcdcd545'
assert annotation.record_collection[0].zone_id == '10187fa3-30df-4eb4-a1e9-6b1dcdc79951'
assert annotation.record_collection[0].id == '6e73eff2-06f3-11ea-976a-b2cdca212bc0'
assert \
annotation.mask_collection['zone_footprint'].coordinates == [[[0, 0], [0, 256], [256, 256], [256, 0], [0, 0]]]
assert driver._memcache[(annotation_path, )] is annotation
# +--> Reopen
assert driver((annotation_path, ), group='value') is annotation
def test_tile_driver(reference_image): # noqa: R701
# +-> Base
driver = TileDriver(fetch_ordering=False)
tiles = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
assert isinstance(tiles, TileCollection)
assert len(tiles) == 3
assert all(isinstance(tile, Tile) for tile in tiles.values())
assert all(name == 'tile_{}'.format(i) for i, name in enumerate(tiles.keys()))
assert all(tile.ptype == RGB for tile in tiles.values())
assert all(tile.dtype == np.uint8 for tile in tiles.values())
assert all(np.array_equal(reference_image, tile) for tile in tiles.values())
# +-> PType
driver = TileDriver(ptype=BGR, fetch_ordering=False)
tiles = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
assert isinstance(tiles, TileCollection)
assert len(tiles) == 3
assert all(isinstance(tile, Tile) for tile in tiles.values())
assert all(name == 'tile_{}'.format(i) for i, name in enumerate(tiles.keys()))
assert all(tile.ptype == BGR for tile in tiles.values())
assert all(tile.dtype == np.uint8 for tile in tiles.values())
assert all(np.array_equal(reference_image, tile.astype(ptype=RGB)) for tile in tiles.values())
# +-> DType
driver = TileDriver(dtype=np.float64, fetch_ordering=False)
tiles = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
assert isinstance(tiles, TileCollection)
assert len(tiles) == 3
assert all(isinstance(tile, Tile) for tile in tiles.values())
assert all(name == 'tile_{}'.format(i) for i, name in enumerate(tiles.keys()))
assert all(tile.ptype == RGB for tile in tiles.values())
assert all(tile.dtype == np.float64 for tile in tiles.values())
assert all(np.array_equal(reference_image, tile.astype(dtype=np.uint8)) for tile in tiles.values())
# +-> Names
with pytest.raises(ValueError, match='The number of tiles is incompatible with the provided number'):
driver = TileDriver('not', 'enough', fetch_ordering=False)
_ = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
with pytest.raises(ValueError, match='The number of tiles is incompatible with the provided number'):
driver = TileDriver('too', 'many', 'names', 'provided', fetch_ordering=False)
_ = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
names = ['some', 'tile', 'set']
driver = TileDriver(*names, fetch_ordering=False)
tiles = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
assert isinstance(tiles, TileCollection)
assert len(tiles) == 3
assert all(isinstance(tile, Tile) for tile in tiles.values())
assert all(name == names[i] for i, name in enumerate(tiles.keys()))
assert all(tile.ptype == RGB for tile in tiles.values())
assert all(tile.dtype == np.uint8 for tile in tiles.values())
assert all(np.array_equal(reference_image, tile.astype(dtype=np.uint8)) for tile in tiles.values())
# +-> All
driver = TileDriver(*names, ptype=BGR, dtype=np.float64, fetch_ordering=False)
tiles = driver((Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg',
Path(__file__)[:-1] / '..' / 'test_io' / 'test_tile' / '_data' / 'test_jpg.jpg'), group='value')
assert isinstance(tiles, TileCollection)
assert len(tiles) == 3
assert all(isinstance(tile, Tile) for tile in tiles.values())
assert all(name == names[i] for i, name in enumerate(tiles.keys()))
assert all(tile.ptype == BGR for tile in tiles.values())
assert all(tile.dtype == np.float64 for tile in tiles.values())
assert all(np.array_equal(reference_image, tile.astype(ptype=RGB, dtype=np.uint8)) for tile in tiles.values())
def test_base(playground_tree, reference_image):
root, paths = playground_tree
dataset = PlaygroundDataset(root, use_taxonomy=False)
assert len(dataset) == 5
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'2411dbb6-e7bf-41fd-8898-83325a9c6e5a',
'4a8a08f09d37b73795649038408b5f33')
assert dataset._group_index[1] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'c3e8b68b-f862-41bd-848c-6e2df28e4dd8',
'92eb5ffee6ae2fec3ad71c777531578b')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[3] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
assert dataset._group_index[4] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
assert len(dataset[0].tiles) == 2
assert len(dataset[1].tiles) == 1
assert len(dataset[2].tiles) == 1
assert len(dataset[3].tiles) == 1
assert len(dataset[4].tiles) == 2
assert np.array_equal(reference_image, dataset[0].tiles.iloc[0])
# Test ordering
assert tuple(tile.image_id for tile in dataset[0].tiles.values()) == ("4e15b4a3-ee52-4382-b8a8-7d492fb1a6ed",
"5562b632-72c3-4c21-b24e-e0536d8b20c8")
assert tuple(tile.image_id for tile in dataset[4].tiles.values()) == ("f9525e3bfbd081cd545261b3b5414eb88f689005",
"75ad128196254e711ef7c9b129d1c59153098b18")
assert len(dataset[0].annotation.record_collection) == 1
assert dataset[0].annotation.record_collection[0].labels == ('tag', 'class')
assert dataset[0].annotation.record_collection[0].dataset_id == 'f16fff43-2535-4e34-afec-6404dcdcd545'
assert dataset[0].annotation.record_collection[0].zone_id == '10187fa3-30df-4eb4-a1e9-6b1dcdc79951'
assert dataset[0].annotation.record_collection[0].id == '6e73eff2-06f3-11ea-976a-b2cdca212bc0'
assert dataset[0].annotation.mask_collection['zone_footprint'].coordinates \
== [[[0, 0], [0, 256], [256, 256], [256, 0], [0, 0]]]
def test_select_exclude(playground_tree): # noqa: R701
root, paths = playground_tree
# Dataset:
# +-> Select:
dataset = PlaygroundDataset(root, use_taxonomy=False, select_datasets=('63d0da07-0a4b-4ffd-844f-af75c02288e0', ))
assert len(dataset) == 3
assert dataset._group_index[0] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
# +-> Exclude:
dataset = PlaygroundDataset(root, use_taxonomy=False, exclude_datasets=('1af6c4c5-278d-40ae-9e32-dc8192f8402a', ))
assert len(dataset) == 3
assert dataset._group_index[0] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
# +-> Both:
with pytest.raises(ValueError, match='Invalid dataset: No matches where found between tiles and annotation'):
_ = PlaygroundDataset(root, use_taxonomy=False,
select_datasets=('63d0da07-0a4b-4ffd-844f-af75c02288e0', ),
exclude_datasets=('63d0da07-0a4b-4ffd-844f-af75c02288e0', ))
# Zone:
# +-> Select:
dataset = PlaygroundDataset(root, use_taxonomy=False, select_zones=('b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'c3e8b68b-f862-41bd-848c-6e2df28e4dd8'))
assert len(dataset) == 3
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'c3e8b68b-f862-41bd-848c-6e2df28e4dd8',
'92eb5ffee6ae2fec3ad71c777531578b')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
# +-> Exclude:
dataset = PlaygroundDataset(root, use_taxonomy=False, exclude_zones=('b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'c3e8b68b-f862-41bd-848c-6e2df28e4dd8'))
assert len(dataset) == 2
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'2411dbb6-e7bf-41fd-8898-83325a9c6e5a',
'4a8a08f09d37b73795649038408b5f33')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
# +-> Both:
dataset = PlaygroundDataset(root, use_taxonomy=False,
select_zones=('b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'c3e8b68b-f862-41bd-848c-6e2df28e4dd8'),
exclude_zones=('c3e8b68b-f862-41bd-848c-6e2df28e4dd8', ))
assert len(dataset) == 2
assert dataset._group_index[0] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
# Image:
# +-> Select:
dataset = PlaygroundDataset(root, use_taxonomy=False,
select_images=('S2B_MSIL1C_20200212T025609_N0209_R003_T47DMH_20200212T054548',
'f9525e3bfbd081cd545261b3b5414eb88f689005'))
assert len(dataset) == 2
assert dataset._group_index[0] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
assert len(dataset[0].tiles) == 1
assert len(dataset[1].tiles) == 1
# +-> Exclude:
dataset = PlaygroundDataset(root, use_taxonomy=False,
exclude_images=('S2B_MSIL1C_20200212T025609_N0209_R003_T47DMH_20200212T054548',
'f9525e3bfbd081cd545261b3b5414eb88f689005'))
assert len(dataset) == 4
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'2411dbb6-e7bf-41fd-8898-83325a9c6e5a',
'4a8a08f09d37b73795649038408b5f33')
assert dataset._group_index[1] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'c3e8b68b-f862-41bd-848c-6e2df28e4dd8',
'92eb5ffee6ae2fec3ad71c777531578b')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[3] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
assert len(dataset[0].tiles) == 2
assert len(dataset[1].tiles) == 1
assert len(dataset[2].tiles) == 1
assert len(dataset[3].tiles) == 1
# +-> Both:
dataset = PlaygroundDataset(root, use_taxonomy=False,
select_images=('S2B_MSIL1C_20200212T025609_N0209_R003_T47DMH_20200212T054548',
'f9525e3bfbd081cd545261b3b5414eb88f689005',
'75ad128196254e711ef7c9b129d1c59153098b18'),
exclude_images=('S2B_MSIL1C_20200212T025609_N0209_R003_T47DMH_20200212T054548',
'75ad128196254e711ef7c9b129d1c59153098b18', ))
assert len(dataset) == 1
assert dataset._group_index[0] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
assert len(dataset[0].tiles) == 1
# Tile:
# +-> Select:
dataset = PlaygroundDataset(root, use_taxonomy=False, select_tiles=('4a8a08f09d37b73795649038408b5f33',
'0cc175b9c0f1b6a831c399e269772661',
'7c47df1097b349278c052e93e1d1903a'))
assert len(dataset) == 3
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'2411dbb6-e7bf-41fd-8898-83325a9c6e5a',
'4a8a08f09d37b73795649038408b5f33')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
# +-> Exclude:
dataset = PlaygroundDataset(root, use_taxonomy=False, exclude_tiles=('92eb5ffee6ae2fec3ad71c777531578b', ))
assert len(dataset) == 4
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'2411dbb6-e7bf-41fd-8898-83325a9c6e5a',
'4a8a08f09d37b73795649038408b5f33')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
assert dataset._group_index[2] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'453e41d218e071ccfb2d1c99ce23906a')
assert dataset._group_index[3] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
# +-> Both:
dataset = PlaygroundDataset(root, use_taxonomy=False,
select_tiles=('4a8a08f09d37b73795649038408b5f33',
'0cc175b9c0f1b6a831c399e269772661',
'7c47df1097b349278c052e93e1d1903a'),
exclude_tiles=('7c47df1097b349278c052e93e1d1903a', ))
assert len(dataset) == 2
assert dataset._group_index[0] == ('1af6c4c5-278d-40ae-9e32-dc8192f8402a',
'2411dbb6-e7bf-41fd-8898-83325a9c6e5a',
'4a8a08f09d37b73795649038408b5f33')
assert dataset._group_index[1] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'b4d9ffe3-ab2d-4f18-b1c5-b4c3d9b2f6f7',
'0cc175b9c0f1b6a831c399e269772661')
def test_select_exclude_composition(playground_tree):
root, paths = playground_tree
dataset = PlaygroundDataset(root, use_taxonomy=False,
exclude_datasets=('1af6c4c5-278d-40ae-9e32-dc8192f8402a', ),
select_zones=('fa719db8-31e9-49d1-9344-d4608ef6417e', ),
exclude_images=('f9525e3bfbd081cd545261b3b5414eb88f689005', ))
assert len(dataset) == 1
assert dataset._group_index[0] == ('63d0da07-0a4b-4ffd-844f-af75c02288e0',
'fa719db8-31e9-49d1-9344-d4608ef6417e',
'7c47df1097b349278c052e93e1d1903a')
assert len(dataset[0].tiles) == 1
def test_pass_taxonomy(playground_tree):
root, paths = playground_tree
dataset = PlaygroundDataset(root, use_taxonomy=True)
with pytest.raises(ValueError):
_ = dataset[0]
def test_taxonomy_conflict_raise(playground_tree_conflict):
root, paths = playground_tree_conflict
with pytest.raises(ValueError, match='Some datasets have mismatching taxonomies'):
_ = PlaygroundDataset(root, use_taxonomy=True)
def test_taxonomy_conflict_warn(playground_tree_conflict):
root, paths = playground_tree_conflict
with pytest.warns(UserWarning, match='Some datasets have mismatching taxonomies'):
_ = PlaygroundDataset(root, use_taxonomy=False)
def test_fetch_ordering_missing_image(playground_tree_summary_missing_image):
root, paths = playground_tree_summary_missing_image
dataset = PlaygroundDataset(root, use_taxonomy=False)
with pytest.raises(ValueError, match='Invalid dataset: Some images seem to be missing from the summaries'):
_ = dataset[5]
dataset = PlaygroundDataset(root, use_taxonomy=False, tile_driver=TileDriver(fetch_ordering=False))
assert isinstance(dataset[5].tiles.iloc[0], Tile)
def test_fetch_ordering_missing_zone(playground_tree_summary_missing_zone):
root, paths = playground_tree_summary_missing_zone
dataset = PlaygroundDataset(root, use_taxonomy=False)
with pytest.raises(ValueError, match='Invalid dataset: Some zones or datasets seem to be '
'missing from the summaries'):
_ = dataset[1]
dataset = PlaygroundDataset(root, use_taxonomy=False, tile_driver=TileDriver(fetch_ordering=False))
assert isinstance(dataset[1].tiles.iloc[0], Tile)
def test_fetch_ordering_missing_dataset(playground_tree_summary_missing_dataset):
root, paths = playground_tree_summary_missing_dataset
dataset = PlaygroundDataset(root, use_taxonomy=False)
with pytest.raises(ValueError, match='Invalid dataset: Some zones or datasets seem to be '
'missing from the summaries'):
_ = dataset[0]
dataset = PlaygroundDataset(root, use_taxonomy=False, tile_driver=TileDriver(fetch_ordering=False))
assert isinstance(dataset[0].tiles.iloc[0], Tile)
def test_fetch_ordering_missing_summaries(playground_tree_summary_missing_summaries):
root, paths = playground_tree_summary_missing_summaries
dataset = PlaygroundDataset(root, use_taxonomy=False)
with pytest.raises(FileNotFoundError, match='Invalid dataset: No file summaries could be found'):
_ = dataset[0]
dataset = PlaygroundDataset(root, use_taxonomy=False, tile_driver=TileDriver(fetch_ordering=False))
assert isinstance(dataset[0].tiles.iloc[0], Tile)
| 57.515924 | 118 | 0.609192 | 2,686 | 27,090 | 5.946761 | 0.089352 | 0.032555 | 0.039442 | 0.050398 | 0.857322 | 0.830589 | 0.816127 | 0.809679 | 0.787829 | 0.767858 | 0 | 0.175274 | 0.266667 | 27,090 | 470 | 119 | 57.638298 | 0.628763 | 0.013511 | 0 | 0.696237 | 0 | 0 | 0.270936 | 0.207426 | 0 | 0 | 0 | 0 | 0.38172 | 1 | 0.037634 | false | 0.002688 | 0.018817 | 0.002688 | 0.05914 | 0.013441 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7873368a0ed09fce4544a2b27786ebeec2bd5f3f | 67 | py | Python | smsframework/providers/forward/__init__.py | JoshBBOXX/py-smsframework | 4f3d812711f5e2e037dc80c4014c815fe2d68a0b | [
"BSD-2-Clause"
] | 14 | 2015-08-20T23:26:51.000Z | 2022-02-17T17:41:35.000Z | smsframework/providers/forward/__init__.py | JoshBBOXX/py-smsframework | 4f3d812711f5e2e037dc80c4014c815fe2d68a0b | [
"BSD-2-Clause"
] | 2 | 2015-08-20T20:46:25.000Z | 2020-05-30T14:05:57.000Z | smsframework/providers/forward/__init__.py | JoshBBOXX/py-smsframework | 4f3d812711f5e2e037dc80c4014c815fe2d68a0b | [
"BSD-2-Clause"
] | 6 | 2015-06-15T16:10:59.000Z | 2020-01-24T23:07:48.000Z | from .provider import ForwardClientProvider, ForwardServerProvider
| 33.5 | 66 | 0.895522 | 5 | 67 | 12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074627 | 67 | 1 | 67 | 67 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
152eac24b88ebd07da6d0484ea512d57a6244683 | 2,350 | py | Python | Cryptography/Mini RSA/main.py | YanickT/picoCTF2021 | 7b3fcdf9d8375d4428cf1fa2fdf4b981bee7d69f | [
"MIT"
] | null | null | null | Cryptography/Mini RSA/main.py | YanickT/picoCTF2021 | 7b3fcdf9d8375d4428cf1fa2fdf4b981bee7d69f | [
"MIT"
] | null | null | null | Cryptography/Mini RSA/main.py | YanickT/picoCTF2021 | 7b3fcdf9d8375d4428cf1fa2fdf4b981bee7d69f | [
"MIT"
] | null | null | null | from decimal import *
N = Decimal(1615765684321463054078226051959887884233678317734892901740763321135213636796075462401950274602405095138589898087428337758445013281488966866073355710771864671726991918706558071231266976427184673800225254531695928541272546385146495736420261815693810544589811104967829354461491178200126099661909654163542661541699404839644035177445092988952614918424317082380174383819025585076206641993479326576180793544321194357018916215113009742654408597083724508169216182008449693917227497813165444372201517541788989925461711067825681947947471001390843774746442699739386923285801022685451221261010798837646928092277556198145662924691803032880040492762442561497760689933601781401617086600593482127465655390841361154025890679757514060456103104199255917164678161972735858939464790960448345988941481499050248673128656508055285037090026439683847266536283160142071643015434813473463469733112182328678706702116054036618277506997666534567846763938692335069955755244438415377933440029498378955355877502743215305768814857864433151287)
e = Decimal(3)
c = Decimal(1220012318588871886132524757898884422174534558055593713309088304910273991073554732659977133980685370899257850121970812405700793710546674062154237544840177616746805668666317481140872605653768484867292138139949076102907399831998827567645230986345455915692863094364797526497302082734955903755050638155202890599808154558034707767377524500302754459807923331810585173010977657982069888996945830789092526932364658459034145456505057469113036134559745659079236466119515004648189278227777550415021840140147319061470183840214034417917161940379351273394212022847037696265532968684592354941479799473941357715953204487236888712642494877545201005807776354854390358015733495331101077851132489983665939643188064986446883595239842621440918456201787168234988410659153219277329426230136499096098072681939491840913961290536851217677043565743644469862992310241563891464225935615676242084658617931225618537173689559419607688905143683603007487996422560430269750305079282818976557285786253025774883158125978164878245223052992502106)
getcontext().prec = 800
i = 0
while True:
m = pow(c + i * N, 1 / e)
hex_m = hex(int(m))[2:]
flag = "".join([chr(int(hex_m[i:i+2], 16)) for i in range(0, len(hex_m), 2)])
if "picoCTF" in flag:
print(i)
print(flag)
break
i += 1
| 123.684211 | 1,019 | 0.930213 | 62 | 2,350 | 35.209677 | 0.548387 | 0.005497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.9004 | 0.042979 | 2,350 | 18 | 1,020 | 130.555556 | 0.070253 | 0 | 0 | 0 | 0 | 0 | 0.00298 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.133333 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
1551c0de9790ca96cb8227a8e99eaa5f1d2e8c8b | 2,954 | py | Python | tests/unit/util/test_video.py | zerofox-oss/deepstar | fe0fe12317975104fa6ff6c058d141f11e6e951d | [
"BSD-3-Clause-Clear"
] | 44 | 2019-08-09T16:14:27.000Z | 2022-02-10T06:54:35.000Z | tests/unit/util/test_video.py | zerofox-oss/deepstar | fe0fe12317975104fa6ff6c058d141f11e6e951d | [
"BSD-3-Clause-Clear"
] | 2 | 2020-09-26T00:05:52.000Z | 2021-03-22T13:27:36.000Z | tests/unit/util/test_video.py | zerofox-oss/deepstar | fe0fe12317975104fa6ff6c058d141f11e6e951d | [
"BSD-3-Clause-Clear"
] | 14 | 2019-08-19T16:47:32.000Z | 2022-03-04T03:57:27.000Z | import os
import unittest
import cv2
from deepstar.util.tempdir import tempdir
from deepstar.util.video import create_one_video_file_from_one_image_file, \
create_one_video_file_from_many_image_files
class TestVideo(unittest.TestCase):
"""
This class tests the video module.
"""
def test_create_one_video_file_from_one_image_file(self):
image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa
with tempdir() as tempdir_:
video_path = os.path.join(tempdir_, 'video.mp4')
ret = create_one_video_file_from_one_image_file(image_0001,
video_path)
self.assertTrue(ret)
vc = cv2.VideoCapture(video_path)
try:
self.assertTrue(vc.isOpened())
self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 1)
finally:
vc.release()
def test_create_one_video_file_from_one_image_file_frame_count(self):
image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa
with tempdir() as tempdir_:
video_path = os.path.join(tempdir_, 'video.mp4')
ret = create_one_video_file_from_one_image_file(image_0001,
video_path,
frame_count=5)
self.assertTrue(ret)
vc = cv2.VideoCapture(video_path)
try:
self.assertTrue(vc.isOpened())
self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5)
finally:
vc.release()
def test_create_one_video_file_from_one_image_file_fails_to_open_image(self): # noqa
image_0001 = 'test'
with tempdir() as tempdir_:
video_path = os.path.join(tempdir_, 'video.mp4')
ret = create_one_video_file_from_one_image_file(image_0001,
video_path,
frame_count=5)
self.assertFalse(ret)
def test_create_one_video_file_from_many_image_files(self):
image_0001 = os.path.dirname(os.path.realpath(__file__)) + '/../../support/image_0001.jpg' # noqa
with tempdir() as tempdir_:
video_path = os.path.join(tempdir_, 'video.mp4')
def image_paths():
for _ in range(0, 5):
yield image_0001
ret = create_one_video_file_from_many_image_files(image_paths, video_path) # noqa
self.assertTrue(ret)
vc = cv2.VideoCapture(video_path)
try:
self.assertTrue(vc.isOpened())
self.assertEqual(vc.get(cv2.CAP_PROP_FRAME_COUNT), 5)
finally:
vc.release()
| 33.954023 | 106 | 0.567028 | 338 | 2,954 | 4.553254 | 0.183432 | 0.064327 | 0.090968 | 0.116959 | 0.820663 | 0.820663 | 0.818713 | 0.814165 | 0.721897 | 0.721897 | 0 | 0.032108 | 0.34631 | 2,954 | 86 | 107 | 34.348837 | 0.764889 | 0.020311 | 0 | 0.666667 | 0 | 0 | 0.044189 | 0.030271 | 0 | 0 | 0 | 0 | 0.175439 | 1 | 0.087719 | false | 0 | 0.087719 | 0 | 0.192982 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ec7a30c305681af1a5daa3ed554c0e92947c7973 | 124 | py | Python | site/filters.py | qnub/qnub.github.io | b2bacc8e7c92d0fdffe7f870a1b7c31f24f68553 | [
"MIT"
] | null | null | null | site/filters.py | qnub/qnub.github.io | b2bacc8e7c92d0fdffe7f870a1b7c31f24f68553 | [
"MIT"
] | null | null | null | site/filters.py | qnub/qnub.github.io | b2bacc8e7c92d0fdffe7f870a1b7c31f24f68553 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def cat_name(cat):
from publishconf import CATEGORY_MAP
return CATEGORY_MAP.get(cat, '')
| 15.5 | 40 | 0.653226 | 17 | 124 | 4.588235 | 0.764706 | 0.282051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 0.201613 | 124 | 7 | 41 | 17.714286 | 0.777778 | 0.169355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ec8707d87b3b7f71792cf9591b9348f7a3122714 | 264 | py | Python | lists_mutation/append_method_adding_list_plus_operator.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | lists_mutation/append_method_adding_list_plus_operator.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | lists_mutation/append_method_adding_list_plus_operator.py | magicalcarpet/the_complete_python_course | 0ac0c5015a93607d7d29258ac0a3fc38dda81bd2 | [
"MIT"
] | null | null | null | countries = ['United States', 'Canada', 'Japan']
print(countries)
print(len(countries))
countries.append('Japan')
print(countries)
print(len(countries))
countries.append('Iraq')
print(countries)
print(len(countries))
countries.append('Bolivia')
print(countries) | 18.857143 | 48 | 0.757576 | 31 | 264 | 6.451613 | 0.322581 | 0.28 | 0.285 | 0.33 | 0.74 | 0.74 | 0.74 | 0.51 | 0 | 0 | 0 | 0 | 0.068182 | 264 | 14 | 49 | 18.857143 | 0.813008 | 0 | 0 | 0.636364 | 0 | 0 | 0.150943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.636364 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
ecb2521b149f1740ae7786fab3341375af08a11f | 31 | py | Python | Using Python to Interact with the Operating System/WEEK 3/Practice Quiz/Solutions.py | manavnarang/Google-IT-Automation-with-Python-Professional-Certificate | ce982870f07cba8200947eda97764fcf8c7dc441 | [
"MIT"
] | 42 | 2020-04-28T09:06:21.000Z | 2022-01-09T01:01:55.000Z | Using Python to Interact with the Operating System/WEEK 3/Practice Quiz/Solutions.py | vaquarkhan/Google-IT-Automation-with-Python-Professional-Certificate | d87dffe924de218f73d61d27689798646824ed6c | [
"MIT"
] | null | null | null | Using Python to Interact with the Operating System/WEEK 3/Practice Quiz/Solutions.py | vaquarkhan/Google-IT-Automation-with-Python-Professional-Certificate | d87dffe924de218f73d61d27689798646824ed6c | [
"MIT"
] | 52 | 2020-05-12T05:29:46.000Z | 2022-01-26T21:24:08.000Z | print("Check the files out!!")
| 15.5 | 30 | 0.677419 | 5 | 31 | 4.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 31 | 1 | 31 | 31 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0.677419 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
ecb4a66cfbbf89326eb6bace06b8e3acb86acd7e | 128 | py | Python | run.py | ivanleoncz/taxi-tracker | 506b94845024b9e32d6b43a36efda6cd50f3c227 | [
"MIT"
] | null | null | null | run.py | ivanleoncz/taxi-tracker | 506b94845024b9e32d6b43a36efda6cd50f3c227 | [
"MIT"
] | null | null | null | run.py | ivanleoncz/taxi-tracker | 506b94845024b9e32d6b43a36efda6cd50f3c227 | [
"MIT"
] | null | null | null | from app import app
if __name__ == "__main__":
app.run(ssl_context=('app/ssl/taxi-driver.crt', 'app/ssl/taxi-driver.key'))
| 25.6 | 79 | 0.695313 | 21 | 128 | 3.809524 | 0.619048 | 0.15 | 0.25 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117188 | 128 | 4 | 80 | 32 | 0.707965 | 0 | 0 | 0 | 0 | 0 | 0.421875 | 0.359375 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
ecde8dc9ac88c1b5347eb79191538aff4f84b27e | 106 | py | Python | gaiadet/models/detectors/__init__.py | zengming16/GAIA-det | cac6b5601d63aeaa3882cea2256dcb2539fecb34 | [
"Apache-2.0"
] | 149 | 2021-06-21T06:18:16.000Z | 2022-03-23T08:55:23.000Z | gaiadet/models/detectors/__init__.py | zengming16/GAIA-det | cac6b5601d63aeaa3882cea2256dcb2539fecb34 | [
"Apache-2.0"
] | 7 | 2021-07-11T07:52:58.000Z | 2022-03-30T11:41:39.000Z | gaiadet/models/detectors/__init__.py | zengming16/GAIA-det | cac6b5601d63aeaa3882cea2256dcb2539fecb34 | [
"Apache-2.0"
] | 13 | 2021-06-29T06:06:13.000Z | 2022-02-28T01:31:17.000Z | from .dynamic_two_stage import DynamicTwoStageDetector
from .dynamic_faster_rcnn import DynamicFasterRCNN
| 35.333333 | 54 | 0.90566 | 12 | 106 | 7.666667 | 0.75 | 0.23913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075472 | 106 | 2 | 55 | 53 | 0.938776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ece88b42c1d0635990d34619d47e4d7f99a8309a | 4,830 | py | Python | 3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/20. Exercise - Testing/03. Hero_Skeleton/hero/test/test_hero.py | kzborisov/SoftUni | ccb2b8850adc79bfb2652a45124c3ff11183412e | [
"MIT"
] | 1 | 2021-02-07T07:51:12.000Z | 2021-02-07T07:51:12.000Z | 3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/20. Exercise - Testing/03. Hero_Skeleton/hero/test/test_hero.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | 3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/20. Exercise - Testing/03. Hero_Skeleton/hero/test/test_hero.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | import unittest
from project.hero import Hero
class TestHero(unittest.TestCase):
username = "Hero"
level = 10
health = 100
damage = 10
def test_hero_initialization(self):
hero = Hero(self.username, self.level, self.health, self.damage)
self.assertEqual(self.username, hero.username)
self.assertEqual(self.level, hero.level)
self.assertEqual(self.health, hero.health)
self.assertEqual(self.damage, hero.damage)
def test_battle__when_username_is_the_same__expect_exception(self):
hero = Hero(self.username, self.level, self.health, self.damage)
enemy = Hero(self.username, self.level, self.health, self.damage)
expected = "You cannot fight yourself"
with self.assertRaises(Exception) as context:
hero.battle(enemy)
self.assertEqual(expected, str(context.exception))
def test_battle__when_hero_health_is_negative__expected_exception(self):
hero = Hero(self.username, self.level, -1, self.damage)
enemy = Hero("enemy", self.level, self.health, self.damage)
expected = "Your health is lower than or equal to 0. You need to rest"
with self.assertRaises(Exception) as context:
hero.battle(enemy)
self.assertEqual(expected, str(context.exception))
def test_battle__when_hero_health_is_zero__expected_exception(self):
hero = Hero(self.username, self.level, 0, self.damage)
enemy = Hero("enemy", self.level, self.health, self.damage)
expected = "Your health is lower than or equal to 0. You need to rest"
with self.assertRaises(Exception) as context:
hero.battle(enemy)
self.assertEqual(expected, str(context.exception))
def test_battle__when_enemy_health_is_zero__expected_exception(self):
enemy_name = "enemy"
hero = Hero(self.username, self.level, self.health, self.damage)
enemy = Hero(enemy_name, self.level, 0, self.damage)
expected = f"You cannot fight {enemy_name}. He needs to rest"
with self.assertRaises(Exception) as context:
hero.battle(enemy)
self.assertEqual(expected, str(context.exception))
def test_battle__when_enemy_health_is_negative__expected_exception(self):
enemy_name = "enemy"
hero = Hero(self.username, self.level, self.health, self.damage)
enemy = Hero(enemy_name, self.level, -1, self.damage)
expected = f"You cannot fight {enemy_name}. He needs to rest"
with self.assertRaises(Exception) as context:
hero.battle(enemy)
self.assertEqual(expected, str(context.exception))
def test_battle__when_hero_and_enemy_health_is_zero__expected_draw(self):
hero = Hero(self.username, self.level, self.health, self.damage)
enemy = Hero("enemy", self.level, self.health, self.damage)
damage = self.damage * self.level
expected = f"Draw"
actual = hero.battle(enemy)
self.assertEqual(self.health - damage, hero.health)
self.assertEqual(self.health - damage, enemy.health)
self.assertEqual(expected, actual)
def test_battle__when_hero_and_enemy_health_is_negative__expected_draw(self):
hero = Hero(self.username, self.level, self.health, 50)
enemy = Hero("enemy", self.level, self.health, 50)
damage = hero.damage * self.level
expected = f"Draw"
actual = hero.battle(enemy)
self.assertEqual(self.health - damage, hero.health)
self.assertEqual(self.health - damage, enemy.health)
self.assertEqual(expected, actual)
def test_battle__when_enemy_health_is_negative__expected_win(self):
hero = Hero(self.username, 10, 1000, 20)
enemy = Hero("enemy", 10, 100, 50)
expected = f"You win"
actual = hero.battle(enemy)
self.assertEqual(expected, actual)
self.assertEqual(11, hero.level)
self.assertEqual(1000 - 10 * 50 + 5, hero.health)
self.assertEqual(20 + 5, hero.damage)
def test_battle__when_enemy_health_is_more_than_zero__expected_win(self):
hero = Hero(self.username, 10, 1000, 50)
enemy = Hero("enemy", 10, 1000, 10)
expected = f"You lose"
actual = hero.battle(enemy)
self.assertEqual(expected, actual)
self.assertEqual(11, enemy.level)
self.assertEqual(1000 - 10 * 50 + 5, enemy.health)
self.assertEqual(10 + 5, enemy.damage)
def test_str(self):
hero = Hero(self.username, self.level, self.health, self.damage)
expected = f"Hero {self.username}: {self.level} lvl\n" \
f"Health: {self.health}\n" \
f"Damage: {self.damage}\n"
self.assertEqual(expected, str(hero))
if __name__ == "__main__":
unittest.main()
| 39.917355 | 81 | 0.666253 | 620 | 4,830 | 5.008065 | 0.109677 | 0.115942 | 0.066989 | 0.07343 | 0.817391 | 0.793559 | 0.77649 | 0.73752 | 0.734622 | 0.655072 | 0 | 0.020364 | 0.227329 | 4,830 | 120 | 82 | 40.25 | 0.811629 | 0 | 0 | 0.468085 | 0 | 0 | 0.081574 | 0 | 0 | 0 | 0 | 0 | 0.308511 | 1 | 0.117021 | false | 0 | 0.021277 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
01da8beb8a0adce4f66ae2d1d0eda4eea3560615 | 153 | py | Python | boa3_test/test_sc/interop_test/stdlib/MemorySearchTooFewArguments.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/interop_test/stdlib/MemorySearchTooFewArguments.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/interop_test/stdlib/MemorySearchTooFewArguments.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from typing import Union
from boa3.builtin.interop.stdlib import memory_search
def main(mem: Union[bytes, str]) -> int:
return memory_search(mem)
| 19.125 | 53 | 0.75817 | 23 | 153 | 4.956522 | 0.73913 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007692 | 0.150327 | 153 | 7 | 54 | 21.857143 | 0.869231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
bf0f51af6c7bf440ab56f594cf1fadb41ea85af6 | 41 | py | Python | vilmedic/__init__.py | jbdel/vilmedic | 17d462a540a2632811cc2a78edd2861800a33b07 | [
"MIT"
] | 15 | 2021-07-24T10:41:07.000Z | 2022-03-27T14:40:47.000Z | vilmedic/__init__.py | jbdel/vilmedic | 17d462a540a2632811cc2a78edd2861800a33b07 | [
"MIT"
] | null | null | null | vilmedic/__init__.py | jbdel/vilmedic | 17d462a540a2632811cc2a78edd2861800a33b07 | [
"MIT"
] | 2 | 2022-02-22T17:37:22.000Z | 2022-03-20T12:55:40.000Z | from .zoo.modeling_auto import AutoModel
| 20.5 | 40 | 0.853659 | 6 | 41 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 41 | 1 | 41 | 41 | 0.918919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
bd91d53934122a96828fca7265633c50201170f5 | 16,094 | py | Python | mailslurp_client/api/bulk_actions_controller_api.py | mailslurp/mailslurp-client-python | a1e9fdc6eb06e192909fd57a64813beb32419594 | [
"MIT"
] | 6 | 2020-04-30T07:47:42.000Z | 2022-03-24T20:58:58.000Z | mailslurp_client/api/bulk_actions_controller_api.py | mailslurp/mailslurp-client-python | a1e9fdc6eb06e192909fd57a64813beb32419594 | [
"MIT"
] | 1 | 2020-09-20T19:58:21.000Z | 2020-11-29T16:49:19.000Z | mailslurp_client/api/bulk_actions_controller_api.py | mailslurp/mailslurp-client-python | a1e9fdc6eb06e192909fd57a64813beb32419594 | [
"MIT"
] | 1 | 2019-08-09T14:55:50.000Z | 2019-08-09T14:55:50.000Z | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailslurp_client.api_client import ApiClient
from mailslurp_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class BulkActionsControllerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def bulk_create_inboxes(self, count, **kwargs): # noqa: E501
"""Bulk create Inboxes (email addresses) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_create_inboxes(count, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int count: Number of inboxes to be created in bulk (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Inbox]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.bulk_create_inboxes_with_http_info(count, **kwargs) # noqa: E501
def bulk_create_inboxes_with_http_info(self, count, **kwargs): # noqa: E501
"""Bulk create Inboxes (email addresses) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_create_inboxes_with_http_info(count, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int count: Number of inboxes to be created in bulk (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Inbox], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'count'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_create_inboxes" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'count' is set
if self.api_client.client_side_validation and ('count' not in local_var_params or # noqa: E501
local_var_params['count'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `count` when calling `bulk_create_inboxes`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'count' in local_var_params and local_var_params['count'] is not None: # noqa: E501
query_params.append(('count', local_var_params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/bulk/inboxes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Inbox]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_delete_inboxes(self, ids, **kwargs): # noqa: E501
"""Bulk Delete Inboxes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_delete_inboxes(ids, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] ids: ids (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.bulk_delete_inboxes_with_http_info(ids, **kwargs) # noqa: E501
def bulk_delete_inboxes_with_http_info(self, ids, **kwargs): # noqa: E501
"""Bulk Delete Inboxes # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_delete_inboxes_with_http_info(ids, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] ids: ids (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'ids'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_delete_inboxes" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'ids' is set
if self.api_client.client_side_validation and ('ids' not in local_var_params or # noqa: E501
local_var_params['ids'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ids` when calling `bulk_delete_inboxes`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'ids' in local_var_params:
body_params = local_var_params['ids']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/bulk/inboxes', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_send_emails(self, bulk_send_email_options, **kwargs): # noqa: E501
"""Bulk Send Emails # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_send_emails(bulk_send_email_options, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param BulkSendEmailOptions bulk_send_email_options: bulkSendEmailOptions (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.bulk_send_emails_with_http_info(bulk_send_email_options, **kwargs) # noqa: E501
def bulk_send_emails_with_http_info(self, bulk_send_email_options, **kwargs): # noqa: E501
"""Bulk Send Emails # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_send_emails_with_http_info(bulk_send_email_options, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param BulkSendEmailOptions bulk_send_email_options: bulkSendEmailOptions (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'bulk_send_email_options'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_send_emails" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'bulk_send_email_options' is set
if self.api_client.client_side_validation and ('bulk_send_email_options' not in local_var_params or # noqa: E501
local_var_params['bulk_send_email_options'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `bulk_send_email_options` when calling `bulk_send_emails`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bulk_send_email_options' in local_var_params:
body_params = local_var_params['bulk_send_email_options']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/bulk/send', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.032086 | 487 | 0.594694 | 1,791 | 16,094 | 5.087102 | 0.116695 | 0.037757 | 0.056854 | 0.029635 | 0.85995 | 0.845462 | 0.836242 | 0.816925 | 0.816925 | 0.801559 | 0 | 0.013133 | 0.332919 | 16,094 | 373 | 488 | 43.147453 | 0.835507 | 0.460171 | 0 | 0.64 | 0 | 0 | 0.161169 | 0.049351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.028571 | 0 | 0.108571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
bdb56b6180b74a7055be2545f59a1f748cad4a24 | 178 | py | Python | practice/pid_line.py | jarzab3/smart_city_mdx | 957ecfc35414d2833f2112bf3d6e0d0e366b119a | [
"MIT"
] | 1 | 2019-01-22T17:19:22.000Z | 2019-01-22T17:19:22.000Z | practice/pid_line.py | jarzab3/smart_city_mdx | 957ecfc35414d2833f2112bf3d6e0d0e366b119a | [
"MIT"
] | null | null | null | practice/pid_line.py | jarzab3/smart_city_mdx | 957ecfc35414d2833f2112bf3d6e0d0e366b119a | [
"MIT"
] | null | null | null | from python_asip_client.mirto_robot import MirtoRobot
from python_asip_client.tcp_mirto_robot import TCPMirtoRobot
from python_asip_client.serial_mirto_robot import SerialBoard
| 35.6 | 61 | 0.910112 | 26 | 178 | 5.807692 | 0.461538 | 0.198676 | 0.278146 | 0.397351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073034 | 178 | 4 | 62 | 44.5 | 0.915152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
bdd7f9d5598309beefb2b3ddec22c7a99e68657c | 200 | py | Python | PythonComparisonOperators/comparison_operator.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | 1 | 2017-05-02T10:28:36.000Z | 2017-05-02T10:28:36.000Z | PythonComparisonOperators/comparison_operator.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | null | null | null | PythonComparisonOperators/comparison_operator.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | null | null | null | print(2 == 2)
print(1 == 0)
print(1 == 1.0)
# not equal
print(2 != 1)
print(2 != 1)
print(2 > 1)
print(2 > 4)
print(2 < 4)
print(2 < 1)
print(2 >= 2)
print(2 >= 1)
print(2 <= 2)
print(2 <= 10)
| 9.090909 | 15 | 0.505 | 42 | 200 | 2.404762 | 0.190476 | 0.653465 | 0.346535 | 0.594059 | 0.683168 | 0.60396 | 0.60396 | 0.60396 | 0 | 0 | 0 | 0.18543 | 0.245 | 200 | 21 | 16 | 9.52381 | 0.483444 | 0.045 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
bddc2c96592e5cf51fcffd7182f4e247f21435b7 | 237 | py | Python | parser/team05/proyecto/Retorno.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team05/proyecto/Retorno.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team05/proyecto/Retorno.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | class Retorno:
def __init__(self, instruccion, nodo):
self._instruccion = instruccion
self._nodo = nodo
def getInstruccion(self):
return self._instruccion
def getNodo(self):
return self._nodo | 23.7 | 42 | 0.649789 | 25 | 237 | 5.84 | 0.4 | 0.308219 | 0.191781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.274262 | 237 | 10 | 43 | 23.7 | 0.848837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.375 | false | 0 | 0 | 0.25 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
da03f5cf64ee459331054f81550f36350ec81a87 | 40 | py | Python | python/testData/refactoring/rename/renamePackageUpdatesFirstFormImports/before/a.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/refactoring/rename/renamePackageUpdatesFirstFormImports/before/a.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/refactoring/rename/renamePackageUpdatesFirstFormImports/before/a.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | import f<caret>oo.m1
print(foo.m1.f())
| 10 | 20 | 0.675 | 9 | 40 | 3 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 0.1 | 40 | 3 | 21 | 13.333333 | 0.694444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.5 | null | null | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
da66ceb9984ff8608ffc522df8f5da9f1c70a466 | 17,584 | py | Python | misc/leaderFollower.py | danalex97/nfsTorrent | 1364d920aca0c1b656cd52ab1107e35801fae83f | [
"MIT"
] | 1 | 2019-03-12T12:34:13.000Z | 2019-03-12T12:34:13.000Z | misc/leaderFollower.py | wade-welles/CacheTorrent | 1364d920aca0c1b656cd52ab1107e35801fae83f | [
"MIT"
] | 38 | 2018-04-11T08:47:07.000Z | 2018-06-20T17:51:11.000Z | misc/leaderFollower.py | wade-welles/CacheTorrent | 1364d920aca0c1b656cd52ab1107e35801fae83f | [
"MIT"
] | 1 | 2019-03-12T12:34:10.000Z | 2019-03-12T12:34:10.000Z | leader = [0,1357131,1357131,1360131,1360131,1360131,1360131,1361631,1361631,1361631,1361631,1361631,1361631,1361631,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1363131,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1364631,1365131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1366131,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1367631,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1369131,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1370631,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1372131,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1373631,1375131,1375131,1375131,1375131,1375131,1375131,1375131,1375131,1375131,1375131,1376631,1376631,1376631,1376631,1376631,1376631,1376631,1376631,1376631,1376631,1378131,1378131,1378131,1378131,1379631,1381131,1381131,1381131,1381131,1384131,1384131,1384131,1385631,1385631,1385631,1387131,1387131,1388631,1390131,1390131,1391631,1393131,1396131,1397631,1399131,1403631]
follower = [0,1393131,1393131,1417131,1420131,1420131,1429131,1433631,1435131,1436631,1441131,1442631,1444131,1454631,1466631,1466631,1469631,1472631,1474131,1474131,1480131,1486131,1487631,1490631,1493631,1498131,1499631,1499631,1501131,1510131,1514631,1514631,1516131,1517631,1517631,1519131,1520631,1523631,1523631,1523631,1523631,1523631,1525131,1525131,1526631,1528131,1528131,1529631,1529631,1531131,1531131,1532631,1532631,1532631,1534131,1535631,1540131,1540131,1544631,1547631,1547631,1547631,1549131,1550631,1556631,1559631,1559631,1561131,1562631,1562631,1562631,1564131,1564131,1564131,1565631,1565631,1565631,1565631,1573131,1573131,1574631,1576131,1577631,1579131,1580631,1580631,1580631,1582131,1582131,1582131,1583631,1583631,1585131,1586631,1588131,1588131,1588131,1589631,1591131,1591131,1594131,1594131,1594131,1595631,1595631,1595631,1597131,1598631,1601631,1603131,1603131,1604631,1604631,1604631,1607631,1609131,1610631,1612131,1616631,1618131,1618131,1619631,1621131,1622631,1624131,1625631,1625631,1625631,1625631,1627131,1627131,1628631,1628631,1628631,1630131,1631631,1631631,1634631,1634631,1634631,1636131,1636131,1636131,1637631,1637631,1637631,1637631,1639131,1640631,1642131,1643631,1643631,1643631,1643631,1645131,1646631,1648131,1649631,1649631,1649631,1649631,1649631,1651131,1651131,1652631,1654131,1657131,1657131,1657131,1658631,1658631,1658631,1660131,1660131,1661631,1663131,1666131,1666131,1667631,1669131,1670631,1670631,1670631,1672131,1672131,1673631,1675131,1676631,1676631,1678131,1678131,1678131,1678131,1678131,1678131,1679631,1681131,1681131,1682631,1684131,1688631,1690131,1690131,1691631,1693131,1693131,1696131,1697631,1697631,1697631,1699131,1699131,1699131,1700631,1700631,1700631,1702131,1702131,1702131,1702131,1703631,1705131,1706631,1706631,1706631,1706631,1708131,1708131,1709631,1711131,1711131,1715631,1720131,1720131,1721631,1721631,1721631,1721631,1726131,1726131,1726131,1730631,1732131,1735131,1735131,1735131,1736631,1738131,1738131,1739631,1742631,1742631,1742631,1748631,1750131,1753131,1756131,1756131,1760631,1760631,1766631,1768131,1771131,1772631,1772631,1774131,1774131,1777131,1780131,1783131,1784631,1787631,1787631,1789131,1790631,1795131,1796631,1796631,1798131,1798131,1798131,1799631,1799631,1799631,1801131,1804131,1807131,1808631,1808631,1808631,1810131,1810131,1810131,1813131,1813131,1817631,1823631,1823631,1823631,1825131,1826631,1828131,1828131,1831131,1834131,1834131,1834131,1835631,1841631,1849131,1855131,1856631,1859631,1864131,1867131,1871631,1871631,1876131,1877631,1885131,1889631,1889631,1891131,1897131,1900131,1901631,1904631,1907631,1910631,1913631,1913631,1915131,1915131,1916631,1918131,1919631,1924131,1928631,1930131,1931631,1934631,1936131,1936131,1955631,1955631,1967631,1967631,1973631,1973631,1982631,1987131,1991631,2023131,2035131,2056131]
leader2 = [0,1240667,1240667,1242167,1242167,1242167,1242167,1242167,1242167,1242167,1242667,1242667,1242667,1242667,1242667,1242667,1242667,1243667,1243667,1243667,1243667,1243667,1243667,1243667,1243667,1243667,1243667,1244167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1250167,1250167,1250167,1250167,1250167,1250167,1250167,1250167,1250167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251667,1251667,1252667,1252667,1252667,1252667,1252798,1254167,1255667,1258667,1260167,1260167]
follower2 = [0,1242167,1243667,1244167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1245167,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1246667,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1248167,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249667,1249878,1250167,1250167,1250167,1250167,1250167,1250167,1250167,1250167,1250167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1251167,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1252667,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1254167,1255667,1255667,1255667,1255667,1255667,1255667,1255667,1257167,1257167,1257167,1257167,1257167,1257167,1260167,1263167,1266167,1267667,1269167,1270667,1270667,1272167,1272167,1273667,1273667,1275167,1275167,1278167,1279667,1281167,1285667,1285667,1287167,1299167,1303667,1306667,1308167,1314167,1320167,1321691,1321699,1341167,1347167,1347167,1348682,1359167,1362167,1365167,1375667,1441679]
leader3 = [0,584232,608232,648732,651732,825732,825732,840732,848232,851232,851232,858732,861732,866232,866232,870732,872232,876732,879732,884232,884232,884232,887232,888732,888732,891732,891732,893232,893232,894732,896232,897732,897732,897732,900732,900732,900732,900732,902232,903732,903732,903732,905232,905232,905232,909732,911232,912732,912732,912732,912732,912732,914232,914232,914232,915732,915732,915732,915732,917232,917232,917232,917232,917232,918732,920232,920232,921732,921732,921732,923232,923232,923232,923232,924732,924732,924732,924732,926232,926232,926232,926232,927732,927732,927732,927732,927732,929232,929232,929232,929232,929232,930732,930732,930732,930732,932232,933732,935232,935232,935232,935232,936732,936732,936732,938232,938232,938232,938232,938232,939732,939732,939732,939732,939732,941232,941232,941232,942732,942732,942732,942732,942732,942732,942732,942732,944232,945732,945732,945732,945732,945732,945732,945732,947232,947232,947232,947232,947232,947232,947232,947232,948732,948732,948732,948732,948732,948732,950232,950232,950232,950232,950232,950232,950298,951732,951732,951732,951732,951732,951732,951732,951732,953232,953232,953232,953232,953232,953232,954732,954732,954732,954732,954732,954732,954732,956232,956232,956232,956232,956232,956232,957732,957732,957732,957732,959232,959232,959232,959232,959232,959232,959232,959232,959232,960732,960732,960732,960732,960732,960732,962232,962232,962232,962232,962232,962232,962232,962232,963732,963732,963732,963732,963732,963732,963732,963732,963732,963732,965232,965232,965232,965232,965232,965232,965232,965232,965232,966732,966732,966732,966732,966732,968232,968232,968232,968232,968232,968232,968232,969732,969732,969732,969732,971232,971232,971232,971232,971232,971232,971232,971232,971232,972732,972732,972732,972732,972732,974232,974232,974232,974232,975732,975732,975732,975732,975732,977232,977232,977232,977232,978732,978732,978732,978732,978732,980232,980232,980232,980232,980232,980232,980232,980232,980232,980232,980232,980232,981732,981732,981732,981732,981732,981732,983232,983232,983232,983232,984732,984732,984732,986232,986232,986232,986232,986232,986232,986232,986232,987732,987732,987732,989232,989232,989232,990732,990732,992232,992232,993732,993732,993732,993732,995232,995232,995232,995232,995232,996732,996732,999732,999732,1001232,1002732,1004232,1004232,1005732,1005732,1007232,1007232,1008732,1010232,1011732,1014732,1014732,1016232,1019232]
follower3 = [0,1110732,1119732,1119732,1122732,1136232,1139232,1139232,1142232,1143732,1145232,1145232,1146732,1152732,1152732,1154232,1154232,1157232,1157232,1158732,1158732,1163232,1172232,1173732,1175232,1176732,1181232,1181232,1181232,1184232,1184232,1187232,1190232,1191732,1193232,1194732,1194732,1196232,1196232,1200732,1200732,1200732,1203732,1203732,1206732,1206732,1206732,1206732,1208232,1209732,1209732,1211232,1211232,1211232,1211232,1212732,1212732,1214232,1214232,1214232,1214232,1214232,1214232,1215732,1215732,1215732,1215732,1217232,1217232,1217232,1217232,1217232,1218732,1218732,1218732,1218732,1220232,1220232,1221732,1221732,1224732,1224732,1224732,1224732,1226232,1227732,1227732,1227732,1227732,1229232,1229232,1229232,1229232,1230732,1232232,1233732,1233732,1233732,1233732,1238232,1238232,1238232,1239732,1241232,1241232,1242732,1242732,1242732,1242732,1242732,1244232,1245732,1245732,1247232,1247232,1248732,1248732,1250232,1250232,1250232,1250232,1250232,1250232,1251732,1251732,1251732,1251732,1253232,1254732,1254732,1254732,1257732,1257732,1257732,1259232,1259232,1259232,1259232,1259232,1260732,1260732,1262232,1262232,1262232,1263732,1263732,1265232,1265232,1268232,1268232,1268232,1268232,1268232,1269732,1269732,1269732,1271232,1272732,1274232,1274232,1274232,1275732,1275732,1275732,1277232,1277232,1278732,1278732,1278732,1278732,1278732,1278732,1281732,1281732,1284732,1286232,1286232,1287732,1287732,1289232,1290732,1290732,1290732,1292232,1292232,1296732,1296732,1296732,1298232,1299732,1299732,1299732,1299732,1299732,1299732,1299732,1301232,1301232,1302732,1302732,1304232,1305732,1305732,1305732,1305732,1307232,1307232,1308732,1311732,1311732,1313232,1314732,1316232,1316232,1317732,1317732,1317732,1319232,1319232,1319232,1320732,1320732,1320732,1320732,1322232,1322232,1322232,1323732,1326732,1326732,1326732,1326732,1328232,1329732,1329732,1331232,1331232,1332732,1332732,1332732,1334232,1334232,1335732,1335732,1337232,1338732,1338732,1338732,1340232,1340232,1340232,1341732,1343232,1343232,1343232,1343232,1344732,1344732,1346232,1346232,1347732,1349232,1349232,1350732,1353732,1355232,1356732,1359732,1359732,1361232,1361232,1362732,1364232,1364232,1364232,1364232,1365732,1365732,1367232,1367232,1368732,1368732,1371732,1374732,1376232,1376232,1376232,1377732,1377732,1379232,1379232,1380732,1380732,1382232,1382232,1383732,1388232,1391232,1392732,1392732,1395732,1395732,1398732,1400232,1400232,1401732,1401732,1401732,1403232,1404732,1404732,1404732,1406232,1406232,1407732,1409232,1409232,1410732,1412232,1413732,1415232,1415232,1415232,1421232,1421232,1424232,1424232,1425732,1427232,1427232,1427232,1431732,1437732,1439232,1439275,1442232,1452732,1461732,1466232,1466232,1470732,1472232,1473732,1475232,1475232,1490263,1511232,1514232,1517232,1520232,1526232,1553232,1562232,1625232]
def sparse(y):
return list(y[:-10:10])
def hmm(x):
return [float(i) / float(len(x)) * 100 for i in range(len(x))]
from plot import *
plots = [
LeaderPlot()
.plot(sparse(leader)[1:], sparse(hmm(leader))[1:], '+', fillstyle='none', color='black', label="Leader CDF")
.plot(sparse(follower)[1:], sparse(hmm(follower))[1:], '^', fillstyle='none', color='black', label="Follower CDF")
.save("plots/leaderFollower.png"),
# Asym
LeaderPlot()
.plot(sparse(leader2)[1:], sparse(hmm(leader2))[1:], '+', fillstyle='none', color='black', label="Leader CDF")
.plot(sparse(follower2)[1:], sparse(hmm(follower2))[1:], '^', fillstyle='none', color='black', label="Follower CDF")
.save("plots/leaderFollower2.png"),
# Bigseed
LeaderPlot()
.plot(sparse(leader3)[1:], sparse(hmm(leader3))[1:], '+', fillstyle='none', color='black', label="Leader CDF")
.plot(sparse(follower3)[1:], sparse(hmm(follower3))[1:], '^', fillstyle='none', color='black', label="Follower CDF")
.save("plots/leaderFollower3.png"),
]
| 517.176471 | 3,703 | 0.85629 | 2,244 | 17,584 | 6.709893 | 0.269608 | 0.144119 | 0.213389 | 0.2808 | 0.537757 | 0.509464 | 0.484824 | 0.468885 | 0.458126 | 0.440659 | 0 | 0.825558 | 0.008929 | 17,584 | 33 | 3,704 | 532.848485 | 0.038446 | 0.000682 | 0 | 0.12 | 0 | 0 | 0.011384 | 0.004212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0.08 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
da6c637f1b37355f05c71e72b2be500df154bdec | 323 | py | Python | skywalker/__init__.py | dgerosa/plottingstuff | 9c52d289cddc01b00da6689fca4b20635da028d5 | [
"MIT"
] | 2 | 2018-08-17T00:59:50.000Z | 2020-03-27T21:09:46.000Z | skywalker/__init__.py | dgerosa/plottingstuff | 9c52d289cddc01b00da6689fca4b20635da028d5 | [
"MIT"
] | null | null | null | skywalker/__init__.py | dgerosa/plottingstuff | 9c52d289cddc01b00da6689fca4b20635da028d5 | [
"MIT"
] | null | null | null | from .skywalker import __name__
from .skywalker import __version__
from .skywalker import __description__
from .skywalker import __license__
from .skywalker import __author__
from .skywalker import __author_email__
from .skywalker import __url__
from .skywalker import __doc__
from .skywalker import *
from .test import *
| 26.916667 | 39 | 0.835913 | 39 | 323 | 6.076923 | 0.307692 | 0.493671 | 0.721519 | 0.21097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.126935 | 323 | 11 | 40 | 29.363636 | 0.840426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
16fdca397b9c6435ec02f333e8439d7be2deddba | 27 | py | Python | src/schnetpack/md/parsers/__init__.py | sxie22/schnetpack | a421e7c121c7bdb2838fb30f887812110ecfa3c6 | [
"MIT"
] | null | null | null | src/schnetpack/md/parsers/__init__.py | sxie22/schnetpack | a421e7c121c7bdb2838fb30f887812110ecfa3c6 | [
"MIT"
] | null | null | null | src/schnetpack/md/parsers/__init__.py | sxie22/schnetpack | a421e7c121c7bdb2838fb30f887812110ecfa3c6 | [
"MIT"
] | 1 | 2022-02-10T17:39:11.000Z | 2022-02-10T17:39:11.000Z | from .orca_parser import *
| 13.5 | 26 | 0.777778 | 4 | 27 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e5306a81bdfaa94441b4fb79275af84e6f3a5407 | 9,070 | py | Python | gmm_fit3.py | stlucas44/direct_gmm | b5523d835f21a35089cfd6975caf2a6f07c43b78 | [
"MIT"
] | 13 | 2019-04-14T21:09:08.000Z | 2020-04-08T22:10:24.000Z | gmm_fit3.py | stlucas44/direct_gmm | b5523d835f21a35089cfd6975caf2a6f07c43b78 | [
"MIT"
] | null | null | null | gmm_fit3.py | stlucas44/direct_gmm | b5523d835f21a35089cfd6975caf2a6f07c43b78 | [
"MIT"
] | 4 | 2019-04-14T00:58:08.000Z | 2020-11-16T17:21:47.000Z | import numpy as np
from scipy.stats import multivariate_normal as mvn_pdf
import matplotlib.pyplot as plt
from cluster import MiniBatchKMeans
from mixture import GaussianMixture
import pymesh
from scipy.special import logsumexp
mesh0 = pymesh.load_mesh("bunny/bun_zipper_res4.ply")
#mesh3 = pymesh.load_mesh("bunny/bun_zipper_res4_pds.ply")
#mesh4 = pymesh.load_mesh("bunny/bun_zipper_res4_25k_pds.ply")
mesh4 = pymesh.load_mesh("bunny/bun_zipper_res4_sds.ply")
def get_centroids(mesh):
# obtain a vertex for each face index
face_vert = mesh.vertices[mesh.faces.reshape(-1),:].reshape((mesh.faces.shape[0],3,-1))
# face_vert is size (faces,3(one for each vert), 3(one for each dimension))
centroids = face_vert.sum(1)/3.0
ABAC = face_vert[:,1:3,:] - face_vert[:,0:1,:]
areas = np.linalg.norm(np.cross(ABAC[:,0,:],ABAC[:,1,:]),axis=1)/2.0
return centroids, areas
com,a = get_centroids(mesh0)
face_vert = mesh0.vertices[mesh0.faces.reshape(-1),:].reshape((mesh0.faces.shape[0],3,-1))
#gm3 = GaussianMixture(100,init_params='kmeans'); gm3.set_triangles(face_vert); gm3.fit(com); gm3.set_triangles(None)
gm3 = GaussianMixture(1,init_params='kmeans',tol=1e-4,max_iter=100); gm3.fit(mesh4.vertices)
def tri_loss(gmm,faces_and_verts):
centroids = face_vert.mean(1)
ABAC = face_vert[:,1:3,:] - face_vert[:,0:1,:]
areas = np.linalg.norm(np.cross(ABAC[:,0,:],ABAC[:,1,:]),axis=1)/2.0
#areas = areas/areas.sum()
total = 0.0
#for idx, face in enumerate(faces_and_verts):
#face is 3 faces with 3d locs
#center = face.mean(0)
#centr2 = centroids[idx,:]
A = faces_and_verts[:,0,:]
B = faces_and_verts[:,1,:]
C = faces_and_verts[:,2,:]
#m = center.reshape((-1,1))
#thing = np.zeros(gmm.weights_.shape)
thing = np.zeros((faces_and_verts.shape[0],gmm.weights_.shape[0]))
i = 0
#things =
weights = np.zeros(thing.shape)
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
weights[:,i] = mvn_pdf.pdf(centroids,mu,s)
#print(mvn_pdf.pdf(points,mu,s).shape,weights.shape)
i+=1
row_sums = weights.sum(axis=1)
#print(row_sums.shape)
weights = weights / row_sums[:, np.newaxis]
i=0
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
res = 0.0
dev = (centroids - mu)
res = 0.0
res -= 0.5 * np.log(2*np.pi) *3
res -= 0.5 * np.log(np.linalg.det(s))
t1 = (dev.dot(si)*dev).sum(1)
t2 = (A.dot(si)*A + B.dot(si)*B + C.dot(si)*C - 3*centroids.dot(si)*centroids).sum(1)
#print("T1\t",t1.sum(),t1.min(),t1.max(),t1.mean())
#print("T2\t",t2.sum(),t2.min(),t2.max(),t2.mean())
res -= 0.5 * (t1 + (1.0/12.0) * t2)
total += ((res + np.log(pi))).sum()
thing[:,i] = ((res+ np.log(pi)))
i+=1
#total += thing.sum()*#areas[idx]#logsumexp(thing)*areas[idx]
return logsumexp(thing,axis=1).mean()#.sum()/areas.sum()#.mean()#/points.shape[0]
#return total/areas.sum()#faces_and_verts.shape[0]
def tri_loss_lb(gmm,faces_and_verts):
centroids = face_vert.mean(1)
ABAC = face_vert[:,1:3,:] - face_vert[:,0:1,:]
areas = np.linalg.norm(np.cross(ABAC[:,0,:],ABAC[:,1,:]),axis=1)/2.0
#areas = areas/areas.sum()
total = 0.0
#for idx, face in enumerate(faces_and_verts):
#face is 3 faces with 3d locs
#center = face.mean(0)
#centr2 = centroids[idx,:]
A = faces_and_verts[:,0,:]
B = faces_and_verts[:,1,:]
C = faces_and_verts[:,2,:]
#m = center.reshape((-1,1))
#thing = np.zeros(gmm.weights_.shape)
thing = np.zeros((faces_and_verts.shape[0],gmm.weights_.shape[0]))
i = 0
#things =
weights = np.zeros(thing.shape)
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
weights[:,i] = mvn_pdf.pdf(centroids,mu,s)
#print(mvn_pdf.pdf(points,mu,s).shape,weights.shape)
i+=1
row_sums = weights.sum(axis=1)
#print(row_sums.shape)
weights = weights / row_sums[:, np.newaxis]
i=0
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
res = 0.0
dev = (centroids - mu)
res = 0.0
res -= 0.5 * np.log(2*np.pi) *3
res -= 0.5 * np.log(np.linalg.det(s))
t1 = (dev.dot(si)*dev).sum(1)
t2 = (A.dot(si)*A + B.dot(si)*B + C.dot(si)*C - 3*centroids.dot(si)*centroids).sum(1)
#print("T1\t",t1.sum(),t1.min(),t1.max(),t1.mean())
#print("T2\t",t2.sum(),t2.min(),t2.max(),t2.mean())
res -= 0.5 * (t1 + (1.0/12.0) * t2)
total += ((res + np.log(pi))).sum()
thing[:,i] = ((res+ np.log(pi)))*areas#/areas.mean()
i+=1
#total += thing.sum()*#areas[idx]#logsumexp(thing)*areas[idx]
#thing = thing*weights
return np.sum(thing,axis=1).sum()/areas.sum()#.sum()/areas.sum()#.mean()#/points.shape[0]
#return total/areas.sum()#faces_and_verts.shape[0]
def pt_loss(gmm,points):
total = 0.0
#for p in points:
thing = np.zeros((points.shape[0],gmm.weights_.shape[0]))
i = 0
#things =
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
res = 0.0
dev = points-mu
res = 0.0
res -= 0.5 * np.log(2*np.pi) *3
res -= 0.5 * np.log(np.linalg.det(s))
t1 = (dev.dot(si) * dev).sum(1)
res -= 0.5 * t1
#total += (res + np.log(pi)).sum()
thing[:,i] = (res + np.log(pi))
i+=1
#total += thing.sum()#logsumexp(thing)
return logsumexp(thing,axis=1).mean()#logsumexp(thing,axis=1).mean()#/points.shape[0]
def pt_loss_lb(gmm,points):
total = 0.0
#for p in points:
thing = np.zeros((points.shape[0],gmm.weights_.shape[0]))
i = 0
#things =
weights = np.zeros(thing.shape)
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
weights[:,i] = mvn_pdf.pdf(points,mu,s)
#print(mvn_pdf.pdf(points,mu,s).shape,weights.shape)
i+=1
row_sums = weights.sum(axis=1)
#print(row_sums.shape)
weights = weights / row_sums[:, np.newaxis]
i=0
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
res = 0.0
dev = points-mu
res = 0.0
res -= 0.5 * np.log(2*np.pi) *3
res -= 0.5 * np.log(np.linalg.det(s))
t1 = (dev.dot(si) * dev).sum(1)
res -= 0.5 * t1
#total += (res + np.log(pi)).sum()
thing[:,i] = (res + np.log(pi))
i+=1
#total += thing.sum()#logsumexp(thing)
#thing = thing*weights
return thing.sum(axis=1).mean()#logsumexp(thing,axis=1).mean()#/points.shape[0]
def com_loss(gmm,points,areas):
total = 0.0
#for p in points:
thing = np.zeros((points.shape[0],gmm.weights_.shape[0]))
i = 0
#things =
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
res = 0.0
dev = points-mu
res = 0.0
res -= 0.5 * np.log(2*np.pi) *3
res -= 0.5 * np.log(np.linalg.det(s))
t1 = (dev.dot(si) * dev).sum(1)
res -= 0.5 * t1
#total += (res + np.log(pi)).sum()
thing[:,i] = (res + np.log(pi))*(areas/areas.mean())
i+=1
#total += thing.sum()#logsumexp(thing)
return logsumexp(thing,axis=1).mean()#/points.shape[0]
def com_loss_lb(gmm,points,areas):
total = 0.0
#for p in points:
thing = np.zeros((points.shape[0],gmm.weights_.shape[0]))
i = 0
#things =
for mu, s, si, pi in zip(gmm.means_,gmm.covariances_,gmm.precisions_,gmm.weights_):
res = 0.0
dev = points-mu
res = 0.0
res -= 0.5 * np.log(2*np.pi) *3
res -= 0.5 * np.log(np.linalg.det(s))
t1 = (dev.dot(si) * dev).sum(1)
res -= 0.5 * t1
#total += (res + np.log(pi)).sum()
thing[:,i] = (res + np.log(pi))*(areas/areas.mean())
i+=1
#total += thing.sum()#logsumexp(thing)
return np.sum(thing,axis=1).mean()#/points.shape[0]
if True:
tl = tri_loss_lb
cl = com_loss_lb
pl = pt_loss_lb
print("OMG")
else:
tl = tri_loss
cl = com_loss
pl = pt_loss
print("tri\t",tl(gm3,face_vert),'\t',0)
print("mpt\t",pl(gm3,com),'\t',0)
print('com\t',cl(gm3,com,a),'\t',0)
#print("ptLB\t",pt_loss_lb(gm3,com))
#print("spt\t",gm3.score(com))
#print("sp\t",gm3._estimate_weighted_log_prob(com).sum())
for pn in np.logspace(1,np.log10(mesh4.vertices.shape[0]*.95),10):
scores = []
for itern in range(10):
ptsn = np.random.choice(range(mesh4.vertices.shape[0]),int(pn),replace=False)
scores.append(pl(gm3,mesh4.vertices[ptsn,:]))
#scores.append(gm3._estimate_weighted_log_prob(mesh4.vertices[ptsn,:]).sum()/pn)
scores = np.array(scores)
print(ptsn.shape[0],'\t',scores.mean(),'\t',scores.std())
#print(" ",gm3.score(mesh4.vertices))
#print(" ",gm3._estimate_weighted_log_prob(mesh4.vertices).sum()/mesh4.vertices.shape[0])
| 36.28 | 117 | 0.593054 | 1,493 | 9,070 | 3.496986 | 0.108506 | 0.022984 | 0.017238 | 0.016089 | 0.767861 | 0.755602 | 0.743919 | 0.711741 | 0.711741 | 0.711741 | 0 | 0.043496 | 0.209151 | 9,070 | 249 | 118 | 36.425703 | 0.684372 | 0.260198 | 0 | 0.713415 | 0 | 0 | 0.013277 | 0.008147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042683 | false | 0 | 0.042683 | 0 | 0.128049 | 0.030488 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e54a7586196132392254687a371512fbba6370e8 | 143 | py | Python | sanrr/__init__.py | ddfabbro/SANRR | aa5b71b1e8ac1e0471828922ff50e098d550a157 | [
"MIT"
] | 1 | 2019-01-18T02:53:12.000Z | 2019-01-18T02:53:12.000Z | sanrr/__init__.py | ddfabbro/SANRR | aa5b71b1e8ac1e0471828922ff50e098d550a157 | [
"MIT"
] | null | null | null | sanrr/__init__.py | ddfabbro/SANRR | aa5b71b1e8ac1e0471828922ff50e098d550a157 | [
"MIT"
] | null | null | null | from sanrr.download_data import create_lfw_db, create_fei_db, save_files
from sanrr.metamodel import MyKriging
from sanrr.register import SANRR | 47.666667 | 72 | 0.874126 | 23 | 143 | 5.173913 | 0.608696 | 0.226891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 143 | 3 | 73 | 47.666667 | 0.915385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e54fa83884441021ce4f8d5b0576c772667a1c58 | 1,066 | py | Python | models/datasets.py | Jerrypiglet/Total3DUnderstanding | 655d00a988c839af3b73f8ab890c3f70c1500147 | [
"MIT"
] | 288 | 2020-06-27T16:13:35.000Z | 2022-03-31T12:47:42.000Z | models/datasets.py | Jerrypiglet/Total3DUnderstanding | 655d00a988c839af3b73f8ab890c3f70c1500147 | [
"MIT"
] | 38 | 2020-07-03T09:19:24.000Z | 2022-03-17T12:32:56.000Z | models/datasets.py | Jerrypiglet/Total3DUnderstanding | 655d00a988c839af3b73f8ab890c3f70c1500147 | [
"MIT"
] | 40 | 2020-06-28T03:21:01.000Z | 2022-03-29T10:17:20.000Z | # Base data of networks
# author: ynie
# date: Feb, 2020
import os
from torch.utils.data import Dataset
import json
class SUNRGBD(Dataset):
def __init__(self, config, mode):
'''
initiate SUNRGBD dataset for data loading
:param config: config file
:param mode: train/val/test mode
'''
self.config = config
self.mode = mode
split_file = os.path.join(config['data']['split'], mode + '.json')
with open(split_file) as file:
self.split = json.load(file)
def __len__(self):
return len(self.split)
class PIX3D(Dataset):
def __init__(self, config, mode):
'''
initiate PIX3D dataset for data loading
:param config: config file
:param mode: train/val/test mode
'''
self.config = config
self.mode = mode
split_file = os.path.join(config['data']['split'], mode + '.json')
with open(split_file) as file:
self.split = json.load(file)
def __len__(self):
return len(self.split) | 27.333333 | 74 | 0.592871 | 135 | 1,066 | 4.533333 | 0.303704 | 0.065359 | 0.045752 | 0.058824 | 0.800654 | 0.800654 | 0.800654 | 0.683007 | 0.683007 | 0.683007 | 0 | 0.007979 | 0.294559 | 1,066 | 39 | 75 | 27.333333 | 0.805851 | 0.237336 | 0 | 0.761905 | 0 | 0 | 0.038199 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.142857 | 0.095238 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
e56dfe0402cd897bebabb77a08d06f2ce191dcfa | 43 | py | Python | HelloWorld.py | debajyoti-iitkgp/Hello-World | fb77b47681a635c71ccee0e0d2d269102941c337 | [
"MIT"
] | null | null | null | HelloWorld.py | debajyoti-iitkgp/Hello-World | fb77b47681a635c71ccee0e0d2d269102941c337 | [
"MIT"
] | null | null | null | HelloWorld.py | debajyoti-iitkgp/Hello-World | fb77b47681a635c71ccee0e0d2d269102941c337 | [
"MIT"
] | null | null | null | #printing hello world
print('Hello World')
| 14.333333 | 21 | 0.767442 | 6 | 43 | 5.5 | 0.666667 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116279 | 43 | 2 | 22 | 21.5 | 0.868421 | 0.465116 | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
e5bf7fa6d8b7753273fe876864ab8a7931dc8d74 | 44 | py | Python | dpmhm/datasets/xjtu/__init__.py | yanncalec/dpmhm | 0a242bc8add0ba1463bb2b63b2c15abb80b83fa7 | [
"MIT"
] | null | null | null | dpmhm/datasets/xjtu/__init__.py | yanncalec/dpmhm | 0a242bc8add0ba1463bb2b63b2c15abb80b83fa7 | [
"MIT"
] | null | null | null | dpmhm/datasets/xjtu/__init__.py | yanncalec/dpmhm | 0a242bc8add0ba1463bb2b63b2c15abb80b83fa7 | [
"MIT"
] | null | null | null | """xjtu dataset."""
from .xjtu import Xjtu
| 11 | 22 | 0.659091 | 6 | 44 | 4.833333 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159091 | 44 | 3 | 23 | 14.666667 | 0.783784 | 0.295455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e5d4339e9f2cad35d466847ae465cbfce99f5150 | 19,448 | py | Python | telaPrincipal.py | jhontavares/NOVO-SISTEMA-ERP | a44f9ff77b6ff3fa57d326847e8e81a797064170 | [
"MIT"
] | null | null | null | telaPrincipal.py | jhontavares/NOVO-SISTEMA-ERP | a44f9ff77b6ff3fa57d326847e8e81a797064170 | [
"MIT"
] | null | null | null | telaPrincipal.py | jhontavares/NOVO-SISTEMA-ERP | a44f9ff77b6ff3fa57d326847e8e81a797064170 | [
"MIT"
] | null | null | null | from tkinter import ttk
from tkinter import *
import sqlite3
class Product(object):
#db_name = 'database.db'
def __init__(self, window):
self.wind = window
self.wind.title("NOME DO SISTEMA")
self.wind.geometry("803x500+150+120")
self.wind.configure(bg='#FFFF00')
self.frame_02 = Frame(self.wind, width=799, height=26, bg='#000000', bd=2, relief='groove')
self.frame_02.place(x=2, y=1)
self.lbl_01_02 = Label(self.frame_02, text='Usuário', font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_01_02.place(x=2, y=1)
self.lbl_01_02 = Label(self.frame_02, text='NOME DA EMPRESA', font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_01_02.place(x=300, y=1)
self.lbl_01_02 = Label(self.frame_02, text="'Dia' de 'Mês' de 'Ano'", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_01_02.place(x=550, y=1)
self.frame_01 = Frame(self.wind, width=799, height=39, bg='#000000', bd=2, relief='groove')
self.frame_01.place(x=2, y=40)
self.lbl_01_01 = Label(self.frame_01, text="HOME", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_01_01.place(x=370, y=8)
# Botões modulares do sistema
self.frame_03 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_03.place(x=2, y=100)
self.btn_03 = Button(self.frame_03, text='Administrativo', font='Georgia 15 bold', width=13, bg='pink', height=5, command=self.administrativo)
self.btn_03.pack()
self.frame_04 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_04.place(x=203, y=100)
self.btn_04 = Button(self.frame_04, text='Comercial', font='Georgia 15 bold', width=13, height=5, command=self.comercial)
self.btn_04.pack()
self.frame_05 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_05.place(x=404, y=100)
self.btn_05 = Button(self.frame_05, text='Financeiro', font='Georgia 15 bold', width=13, height=5, command=self.financeiro)
self.btn_05.pack()
self.frame_06 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_06.place(x=605, y=100)
self.btn_06 = Button(self.frame_06, text='Logística', font='Georgia 15 bold', width=13, height=5, command=self.logistica)
self.btn_06.pack()
self.frame_07 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_07.place(x=2, y=248)
self.btn_07 = Button(self.frame_07, text='Transporte', font='Georgia 15 bold', width=13, height=5, command=self.transporte)
self.btn_07.pack()
self.frame_08 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_08.place(x=203, y=248)
self.btn_08 = Button(self.frame_08, text='Fiscal', font='Georgia 15 bold', width=13, height=5)
self.btn_08.pack()
self.frame_09 = Frame(self.wind, width=200, height=150, bg='Chocolate', bd=2, relief='groove')
self.frame_09.place(x=404, y=248)
self.btn_09 = Button(self.frame_09, text='Contábil', font='Georgia 15 bold', width=13, height=5)
self.btn_09.pack()
self.frame_10 = Frame(self.wind, width=200, height=150, bg='#4F4F4F', bd=2, relief='groove')
self.frame_10.place(x=605, y=248)
self.btn_10 = Button(self.frame_10, text='Pessoal', font='Georgia 15 bold', width=13, height=5)
self.btn_10.pack()
# Botão de Configuração
self.frame_11 = Frame(self.wind, width=130, height=50, bg='#4F4F4F', bd=2, relief='groove')
self.frame_11.place(x=25, y=433)
self.btn_11 = Button(self.frame_11, text='Configuração', width=18, height=2)
self.btn_11.pack()
# Botão de fechar sistema
self.frame_12 = Frame(self.wind, width=130, height=50, bg='#4F4F4F', bd=2, relief='groove')
self.frame_12.place(x=166, y=433)
self.btn_12 = Button(self.frame_12, text='Fechar', width=18, height=2)
self.btn_12.pack()
# Telas de Módulos
def administrativo(self):
#self.botaocadastro = PhotoImage('./img/cadastro.gif')
self.tela_administrativo = Toplevel()
self.tela_administrativo.title('NOME DO SISTEMA')
self.tela_administrativo.geometry("803x500+190+120")
self.tela_administrativo.configure(bg='#FFFF00')
self.frame_adm_01 = Frame(self.tela_administrativo, width=799, height=26, bg='#000000', bd=2, relief='flat')
self.frame_adm_01.place(x=2, y=2)
self.lbl_m1_01 = Label(self.frame_adm_01, text="Usuário", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_01.place(x=2, y=1)
self.lbl_m1_03 = Label(self.frame_adm_01, text="'Dia' de 'Mês' de 'Ano'", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_03.place(x=600, y=1)
self.frame_adm_02 = Frame(self.tela_administrativo, width=799, height=39, bg='#000000', bd=2, relief='flat')
self.frame_adm_02.place(x=2, y=44)
self.lbl_m1_04 = Label(self.frame_adm_02, text="ADMINISTRATIVO", font='Georgia 16 bold', bg='#000000', fg='white')
self.lbl_m1_04.place(x=291, y=2)
# Frame Cadastro
self.frame_adm_03 = Frame(self.tela_administrativo, width=190, height=300, bg='black')
self.frame_adm_03.place(x=20, y=106)
self.lbl_m1_05 = Label(self.frame_adm_03, text='Cadastros', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_05.place(x=2, y=2)
self.btn_cad_fin_01 = Button(self.frame_adm_03, width=18, height=14, bg='green')
self.btn_cad_fin_01.place(x=1, y=33)
self.frame_adm_04 = Frame(self.tela_administrativo, width=190, height=300)
self.frame_adm_04.place(x=305, y=106)
self.lbl_m1_06 = Label(self.frame_adm_04, text='Movimentação', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_06.place(x=2, y=2)
self.frame_adm_05 = Frame(self.tela_administrativo, width=190, height=300)
self.frame_adm_05.place(x=592, y=106)
self.lbl_m1_07 = Label(self.frame_adm_05, text='Visualização', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_07.place(x=2, y=2)
self.frame_adm_06 = Frame(self.tela_administrativo, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_adm_06.place(x=25, y=437)
self.btn_m1_01 = Button(self.frame_adm_06, text='Configuração', width=18, height=2)
self.btn_m1_01.pack()
self.frame_adm_07 = Frame(self.tela_administrativo, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_adm_07.place(x=166, y=437)
self.btn_m1_02 = Button(self.frame_adm_07, text='Manual', width=18, height=2)
self.btn_m1_02.pack()
self.frame_adm_08 = Frame(self.tela_administrativo, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_adm_08.place(x=640, y=437)
self.btn_m1_03 = Button(self.frame_adm_08, text='Fechar', width=18, height=2)
self.btn_m1_03.pack()
self.tela_administrativo.mainloop()
def comercial(self):
self.tela_Comercial = Toplevel()
self.tela_Comercial.title('NOME DO SISTEMA')
self.tela_Comercial.geometry("803x500+190+120")
self.tela_Comercial.configure(bg='#FFFF00')
self.frame_com_01 = Frame(self.tela_Comercial, width=799, height=26, bg='#000000', bd=2, relief='flat')
self.frame_com_01.place(x=2, y=2)
self.lbl_m1_01 = Label(self.frame_com_01, text="Usuário", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_01.place(x=2, y=1)
self.lbl_m1_02 = Label(self.frame_com_01, text="NOME DA EMPRESA", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_02.place(x=300, y=1)
self.lbl_m1_03 = Label(self.frame_com_01, text="'Dia' de 'Mês' de 'Ano'", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_03.place(x=600, y=1)
self.frame_com_02 = Frame(self.tela_Comercial, width=799, height=39, bg='#000000', bd=2, relief='flat')
self.frame_com_02.place(x=2, y=44)
self.lbl_m1_04 = Label(self.frame_com_02, text="COMERCIAL", font='Georgia 16 bold', bg='#000000', fg='white')
self.lbl_m1_04.place(x=291, y=2)
self.frame_com_03 = Frame(self.tela_Comercial, width=190, height=300)
self.frame_com_03.place(x=20, y=106)
self.lbl_m1_05 = Label(self.frame_com_03, text='Cadastros', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_05.place(x=2, y=2)
self.frame_com_04 = Frame(self.tela_Comercial, width=190, height=300)
self.frame_com_04.place(x=305, y=106)
self.lbl_m1_06 = Label(self.frame_com_04, text='Movimentação', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_06.place(x=2, y=2)
self.frame_com_05 = Frame(self.tela_Comercial, width=190, height=300)
self.frame_com_05.place(x=592, y=106)
self.lbl_m1_07 = Label(self.frame_com_05, text='Visualização', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_07.place(x=2, y=2)
self.frame_com_06 = Frame(self.tela_Comercial, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_com_06.place(x=25, y=437)
self.btn_m1_01 = Button(self.frame_com_06, text='Configuração', width=18, height=2)
self.btn_m1_01.pack()
self.frame_com_07 = Frame(self.tela_Comercial, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_com_07.place(x=166, y=437)
self.btn_m1_02 = Button(self.frame_com_07, text='Manual', width=18, height=2)
self.btn_m1_02.pack()
self.frame_com_08 = Frame(self.tela_Comercial, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_com_08.place(x=640, y=437)
self.btn_m1_03 = Button(self.frame_com_08, text='Fechar', width=18, height=2)
self.btn_m1_03.pack()
self.tela_Comercial.mainloop()
# Tela módulo Financeiro
def financeiro(self):
self.tela_Financeiro = Toplevel()
self.tela_Financeiro.title('NOME DO SISTEMA')
self.tela_Financeiro.geometry("803x500+190+120")
self.tela_Financeiro.configure(bg='#FFFF00')
self.frame_fin_01 = Frame(self.tela_Financeiro, width=799, height=26, bg='#000000', bd=2, relief='flat')
self.frame_fin_01.place(x=2, y=2)
self.lbl_m1_01 = Label(self.frame_fin_01, text="Usuário", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_01.place(x=2, y=1)
self.lbl_m1_02 = Label(self.frame_fin_01, text="NOME DA EMPRESA", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_02.place(x=300, y=1)
self.lbl_m1_03 = Label(self.frame_fin_01, text="'Dia' de 'Mês' de 'Ano'", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_03.place(x=600, y=1)
self.frame_fin_02 = Frame(self.tela_Financeiro, width=799, height=39, bg='#000000', bd=2, relief='flat')
self.frame_fin_02.place(x=2, y=44)
self.lbl_m1_04 = Label(self.frame_fin_02, text="COMERCIAL", font='Georgia 16 bold', bg='#000000', fg='white')
self.lbl_m1_04.place(x=291, y=2)
self.frame_fin_03 = Frame(self.tela_Financeiro, width=190, height=300)
self.frame_fin_03.place(x=20, y=106)
self.lbl_m1_05 = Label(self.frame_fin_03, text='Cadastros',bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_05.place(x=2, y=2)
self.frame_fin_04 = Frame(self.tela_Financeiro, width=190, height=300)
self.frame_fin_04.place(x=305, y=106)
self.lbl_m1_06 = Label(self.frame_fin_04, text='Movimentação', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_06.place(x=2, y=2)
self.frame_fin_05 = Frame(self.tela_Financeiro, width=190, height=300)
self.frame_fin_05.place(x=592, y=106)
self.lbl_m1_07 = Label(self.frame_fin_05, text='Visualização', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_07.place(x=2, y=2)
self.frame_fin_06 = Frame(self.tela_Financeiro, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_fin_06.place(x=25, y=437)
self.btn_m1_01 = Button(self.frame_fin_06, text='Configuração', width=18, height=2)
self.btn_m1_01.pack()
self.frame_fin_07 = Frame(self.tela_Financeiro, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_fin_07.place(x=166, y=437)
self.btn_m1_02 = Button(self.frame_fin_07, text='Manual', width=18, height=2)
self.btn_m1_02.pack()
self.frame_fin_08 = Frame(self.tela_Financeiro, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_fin_08.place(x=640, y=437)
self.btn_m1_03 = Button(self.frame_fin_08, text='Fechar', width=18, height=2)
self.btn_m1_03.pack()
self.tela_Financeiro.mainloop()
# Tela módulo Logística
def logistica(self):
self.tela_Logistica = Toplevel()
self.tela_Logistica.title('NOME DO SISTEMA')
self.tela_Logistica.geometry("803x500+190+120")
self.tela_Logistica.configure(bg='#FFFF00')
self.frame_log_01 = Frame(self.tela_Logistica, width=799, height=26, bg='#000000', bd=2, relief='flat')
self.frame_log_01.place(x=2, y=2)
self.lbl_m1_01 = Label(self.frame_log_01, text="Usuário", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_01.place(x=2, y=1)
self.lbl_m1_02 = Label(self.frame_log_01, text="NOME DA EMPRESA", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_02.place(x=300, y=1)
self.lbl_m1_03 = Label(self.frame_log_01, text="'Dia' de 'Mês' de 'Ano'", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_03.place(x=600, y=1)
self.frame_log_02 = Frame(self.tela_Logistica, width=799, height=39, bg='#000000', bd=2, relief='flat')
self.frame_log_02.place(x=2, y=44)
self.lbl_m1_04 = Label(self.frame_log_02, text="LOGÍSTICA", font='Georgia 16 bold', bg='#000000', fg='white')
self.lbl_m1_04.place(x=291, y=2)
self.frame_log_03 = Frame(self.tela_Logistica, width=190, height=300)
self.frame_log_03.place(x=20, y=106)
self.lbl_m1_05 = Label(self.frame_log_03, text='Cadastros',bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_05.place(x=2, y=2)
self.frame_log_04 = Frame(self.tela_Logistica, width=190, height=300)
self.frame_log_04.place(x=305, y=106)
self.lbl_m1_06 = Label(self.frame_log_04, text='Movimentação', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_06.place(x=2, y=2)
self.frame_log_05 = Frame(self.tela_Logistica, width=190, height=300)
self.frame_log_05.place(x=592, y=106)
self.lbl_m1_07 = Label(self.frame_log_05, text='Visualização', bg='blue', width=15, font='Georgia 14 bold', fg='white')
self.lbl_m1_07.place(x=2, y=2)
self.frame_log_06 = Frame(self.tela_Logistica, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_log_06.place(x=25, y=437)
self.btn_m1_01 = Button(self.frame_log_06, text='Configuração', width=18, height=2)
self.btn_m1_01.pack()
self.frame_log_07 = Frame(self.tela_Logistica, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_log_07.place(x=166, y=437)
self.btn_m1_02 = Button(self.frame_log_07, text='Manual', width=18, height=2)
self.btn_m1_02.pack()
self.frame_log_08 = Frame(self.tela_Logistica, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_log_08.place(x=640, y=437)
self.btn_m1_03 = Button(self.frame_log_08, text='Fechar', width=18, height=2)
self.btn_m1_03.pack()
self.tela_Logistica.mainloop()
# Tela módulo Logística
def transporte(self):
self.tela_Transporte = Toplevel()
self.tela_Transporte.title('NOME DO SISTEMA')
self.tela_Transporte.geometry("803x500+190+120")
self.tela_Transporte.configure(bg='#FFFF00')
self.frame_tra_01 = Frame(self.tela_Transporte, width=799, height=26, bg='#000000', bd=2, relief='flat')
self.frame_tra_01.place(x=2, y=2)
self.lbl_m1_01 = Label(self.frame_tra_01, text="Usuário", font='Georgia 12 bold', bg='#000000', fg='white')
self.lbl_m1_01.place(x=2, y=1)
self.lbl_m1_02 = Label(self.frame_tra_01, text="NOME DA EMPRESA", font='Georgia 12 bold', bg='#000000',
fg='white')
self.lbl_m1_02.place(x=300, y=1)
self.lbl_m1_03 = Label(self.frame_tra_01, text="'Dia' de 'Mês' de 'Ano'", font='Georgia 12 bold',
bg='#000000', fg='white')
self.lbl_m1_03.place(x=600, y=1)
self.frame_tra_02 = Frame(self.tela_Transporte, width=799, height=39, bg='#000000', bd=2, relief='flat')
self.frame_tra_02.place(x=2, y=44)
self.lbl_m1_04 = Label(self.frame_tra_02, text="TRANSPORTE", font='Georgia 16 bold', bg='#000000',
fg='white')
self.lbl_m1_04.place(x=291, y=2)
self.frame_tra_03 = Frame(self.tela_Transporte, width=190, height=300)
self.frame_tra_03.place(x=20, y=106)
self.lbl_m1_05 = Label(self.frame_tra_03, text='Cadastros', bg='blue', width=15, font='Georgia 14 bold',
fg='white')
self.lbl_m1_05.place(x=2, y=2)
self.frame_tra_04 = Frame(self.tela_Transporte, width=190, height=300)
self.frame_tra_04.place(x=305, y=106)
self.lbl_m1_06 = Label(self.frame_tra_04, text='Movimentação', bg='blue', width=15, font='Georgia 14 bold',
fg='white')
self.lbl_m1_06.place(x=2, y=2)
self.frame_tra_05 = Frame(self.tela_Transporte, width=190, height=300)
self.frame_tra_05.place(x=592, y=106)
self.lbl_m1_07 = Label(self.frame_tra_05, text='Visualização', bg='blue', width=15, font='Georgia 14 bold',
fg='white')
self.lbl_m1_07.place(x=2, y=2)
self.frame_tra_06 = Frame(self.tela_Transporte, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_tra_06.place(x=25, y=437)
self.btn_m1_01 = Button(self.frame_tra_06, text='Configuração', width=18, height=2)
self.btn_m1_01.pack()
self.frame_tra_07 = Frame(self.tela_Transporte, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_tra_07.place(x=166, y=437)
self.btn_m1_02 = Button(self.frame_tra_07, text='Manual', width=18, height=2)
self.btn_m1_02.pack()
self.frame_tra_08 = Frame(self.tela_Transporte, width=130, height=50, bg='#000000', bd=2, relief='groove')
self.frame_tra_08.place(x=640, y=437)
self.btn_m1_03 = Button(self.frame_tra_08, text='Fechar', width=18, height=2)
self.btn_m1_03.pack()
self.tela_Transporte.mainloop()
if __name__ == '__main__':
window = Tk()
application = Product(window)
window.mainloop() | 53.428571 | 150 | 0.647573 | 3,185 | 19,448 | 3.761381 | 0.048352 | 0.12621 | 0.051085 | 0.044407 | 0.826711 | 0.80025 | 0.767613 | 0.74808 | 0.7298 | 0.720451 | 0 | 0.127672 | 0.191691 | 19,448 | 364 | 151 | 53.428571 | 0.634415 | 0.012803 | 0 | 0.192171 | 0 | 0 | 0.129964 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021352 | false | 0 | 0.010676 | 0 | 0.035587 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e5da2c1c0ca3a26c2885a057cffe52eb642e5443 | 103 | py | Python | finrl_meta/env_execution_optimizing/order_execution_qlib/trade/network/__init__.py | eitin-infant/FinRL-Meta | 4c94011e58425796e7e2e5c1bf848afd65c828d6 | [
"MIT"
] | 214 | 2021-11-08T17:06:11.000Z | 2022-03-31T18:29:48.000Z | finrl_meta/env_execution_optimizing/order_execution_qlib/trade/network/__init__.py | eitin-infant/FinRL-Meta | 4c94011e58425796e7e2e5c1bf848afd65c828d6 | [
"MIT"
] | 51 | 2021-11-14T19:11:02.000Z | 2022-03-30T20:23:08.000Z | finrl_meta/env_execution_optimizing/order_execution_qlib/trade/network/__init__.py | eitin-infant/FinRL-Meta | 4c94011e58425796e7e2e5c1bf848afd65c828d6 | [
"MIT"
] | 110 | 2021-11-03T07:41:40.000Z | 2022-03-31T03:23:38.000Z | from .ppo import *
from .qmodel import *
from .teacher import *
from .util import *
from .opd import *
| 17.166667 | 22 | 0.708738 | 15 | 103 | 4.866667 | 0.466667 | 0.547945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194175 | 103 | 5 | 23 | 20.6 | 0.879518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e5ed9b0530188c6e9643cced845aa34fc26cd354 | 224 | py | Python | jupyter_matlab_vnc_proxy/resources/matlab_launcher.py | mathworks/jupyter-matlab-vnc-proxy | 0a946b3007c450e7fa79c5069174f87a8b3f6b09 | [
"BSD-2-Clause"
] | 10 | 2020-12-21T17:58:06.000Z | 2022-01-06T20:41:54.000Z | jupyter_matlab_vnc_proxy/resources/matlab_launcher.py | mathworks/jupyter-matlab-vnc-proxy | 0a946b3007c450e7fa79c5069174f87a8b3f6b09 | [
"BSD-2-Clause"
] | null | null | null | jupyter_matlab_vnc_proxy/resources/matlab_launcher.py | mathworks/jupyter-matlab-vnc-proxy | 0a946b3007c450e7fa79c5069174f87a8b3f6b09 | [
"BSD-2-Clause"
] | 3 | 2020-12-15T11:13:15.000Z | 2021-09-13T14:42:23.000Z | # Copyright 2020 The MathWorks, Inc.
from os import environ
import subprocess
if "MLM_LICENSE_FILE" in environ:
subprocess.check_call("matlab")
else :
subprocess.check_call(["matlab", "-desktop", "-licmode", "online"])
| 24.888889 | 69 | 0.741071 | 29 | 224 | 5.586207 | 0.758621 | 0.185185 | 0.234568 | 0.308642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 0.125 | 224 | 8 | 70 | 28 | 0.806122 | 0.151786 | 0 | 0 | 0 | 0 | 0.265957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
e5ee0279145e8728df8b605801d61fbf95cd9f9a | 4,079 | py | Python | models/AUG.py | badeok0716/FINAL-PROJECT-AI-LECTURE | 5bbf5f58f8f8caa7a22898c5f809ce2327395c9b | [
"MIT"
] | null | null | null | models/AUG.py | badeok0716/FINAL-PROJECT-AI-LECTURE | 5bbf5f58f8f8caa7a22898c5f809ce2327395c9b | [
"MIT"
] | null | null | null | models/AUG.py | badeok0716/FINAL-PROJECT-AI-LECTURE | 5bbf5f58f8f8caa7a22898c5f809ce2327395c9b | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
import random
def AUGMENT(x, aug='', diff=False):
if aug != 'noaug':
for p in policy.split('_'):
for f in AUGMENT_FNS[p]:
x = f(x, diff=diff)
x = x.contiguous()
return x
def mask(x, prob=0.1,diff=False):
if diff:
batch, seq_len, vocab = x.shape
mask = torch.ones(batch, seq_len, dtype=x.dtype, device=x.device)
mask_tokens = torch.zeros(batch, seq_len, vocab, dtype=x.dtype, device=x.device)
num_mask = int(seq_len * prob)
idx_list = [int(i) for i in range(1,seq_len)] # prevent initial token from masking.
for bidx in range(batch):
idx_mask = random.sample(idx_list, num_mask)
for midx in idx_mask:
mask[bidx][midx] = 0
mask_tokens[bidx][midx][4658] = 1
x = x * mask.unsqueeze(-1) + mask_tokens * (1- mask.unsqueeze(-1))
else:
if x.requires_grad:
print("x has requires grad. something wrong!")
x = x.detach()
batch, seq_len = x.shape
num_mask = int(seq_len * prob)
idx_list = [int(i) for i in range(1,seq_len)] # prevent initial token from masking.
for bidx in range(batch):
idx_mask = random.sample(idx_list, num_mask)
for midx in idx_mask:
x[bidx][midx] = 4658
return x
def rand(x, prob=0.1,diff=False):
if diff:
batch, seq_len, vocab = x.shape
mask = torch.ones(batch, seq_len, dtype=x.dtype, device=x.device)
mask_tokens = torch.zeros(batch, seq_len, vocab, dtype=x.dtype, device=x.device)
num_mask = int(seq_len * prob)
idx_list = [int(i) for i in range(1,seq_len)] # prevent initial token from masking.
token_list = [int(i) for i in range(1,4658)]
for bidx in range(batch):
idx_mask = random.sample(idx_list, num_mask)
for midx in idx_mask:
mask[bidx][midx] = 0
mask_tokens[bidx][midx][random.choice(token_list)] = 1
x = x * mask.unsqueeze(-1) + mask_tokens * (1- mask.unsqueeze(-1))
else:
if x.requires_grad:
print("x has requires grad. something wrong!")
x = x.detach()
batch, seq_len = x.shape
num_mask = int(seq_len * prob)
idx_list = [int(i) for i in range(1,seq_len)] # prevent initial token from masking.
token_list = [int(i) for i in range(1,4658)]
for bidx in range(batch):
idx_mask = random.sample(idx_list, num_mask)
for midx in idx_mask:
x[bidx][midx] = random.choice(token_list)
return x
def swap(x, diff=False):
if diff:
x_detach = x.detach()
batch, seq_len, vocab = x.shape
mask = torch.ones(batch, seq_len, dtype=x.dtype, device=x.device)
mask_tokens = torch.zeros(batch, seq_len, vocab, dtype=x.dtype, device=x.device)
num_mask = int(seq_len * prob)
idx_list = [int(i) for i in range(1,seq_len)] # prevent initial token from masking.
token_list = [int(i) for i in range(1,4658)]
for bidx in range(batch):
i1, i2 = random.sample(idx_list, 2)
mask[bidx][i1] = 0
mask[bidx][i2] = 0
mask_tokens[bidx][i1] = x_detach[bidx][i1]
mask_tokens[bidx][i2] = x_detach[bidx][i2]
x = x * mask.unsqueeze(-1) + mask_tokens * (1- mask.unsqueeze(-1))
else:
if x.requires_grad:
print("x has requires grad. something wrong!")
x = x.detach()
batch, seq_len = x.shape
num_mask = int(seq_len * prob)
idx_list = [int(i) for i in range(1,seq_len)] # prevent initial token from masking.
token_list = [int(i) for i in range(1,4658)]
for bidx in range(batch):
i1, i2 = random.sample(idx_list, 2)
tmp = x[bidx][i1]
x[bidx][i1] = x[bidx][i2]
x[bidx][i2] = tmp
AUGMENT_FNS = {
'mask': [mask],
'rand': [rand],
'swap' : [swap],
} | 40.386139 | 91 | 0.566806 | 612 | 4,079 | 3.648693 | 0.111111 | 0.064487 | 0.059113 | 0.049261 | 0.847291 | 0.835647 | 0.816838 | 0.816838 | 0.816838 | 0.816838 | 0 | 0.024416 | 0.307183 | 4,079 | 101 | 92 | 40.386139 | 0.765747 | 0.052709 | 0 | 0.697917 | 0 | 0 | 0.033437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.03125 | 0 | 0.104167 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f9242b68f7491d47f1097ea7c5163bb5f86b9c0e | 146 | py | Python | lpipe/testing/__init__.py | anton-chekanov/lpipe | acc2c18150584e2e330eb0fbce889ea0ec77cd62 | [
"Apache-2.0"
] | null | null | null | lpipe/testing/__init__.py | anton-chekanov/lpipe | acc2c18150584e2e330eb0fbce889ea0ec77cd62 | [
"Apache-2.0"
] | null | null | null | lpipe/testing/__init__.py | anton-chekanov/lpipe | acc2c18150584e2e330eb0fbce889ea0ec77cd62 | [
"Apache-2.0"
] | null | null | null | from .awslambda import *
from .dynamodb import *
from .kinesis import *
from .s3 import *
from .sqs import *
from .utils import *
# flake8: noqa
| 16.222222 | 24 | 0.712329 | 20 | 146 | 5.2 | 0.5 | 0.480769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 0.191781 | 146 | 8 | 25 | 18.25 | 0.864407 | 0.082192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
00851f92a820c7ae1ca517ae7c493628404cd0e4 | 24 | py | Python | icoshift3/__init__.py | Sour-Smelno/icoshift_py3 | 1a4b6947bfa61fb66682e04372d92865bd517637 | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2015-02-03T22:22:41.000Z | 2022-02-09T10:21:26.000Z | icoshift3/__init__.py | Sour-Smelno/icoshift_py3 | 1a4b6947bfa61fb66682e04372d92865bd517637 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-02-05T12:07:17.000Z | 2020-12-02T15:41:41.000Z | icoshift3/__init__.py | Sour-Smelno/icoshift_py3 | 1a4b6947bfa61fb66682e04372d92865bd517637 | [
"BSD-2-Clause-FreeBSD"
] | 10 | 2016-09-12T16:19:12.000Z | 2021-09-03T06:35:20.000Z | from .icoshift import *
| 12 | 23 | 0.75 | 3 | 24 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 24 | 1 | 24 | 24 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
9703d54c13447bc8a11679c3c592a213bcd6eec3 | 28 | py | Python | Charm/models/__init__.py | TanyaAdams1/Charm | cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d | [
"BSD-3-Clause"
] | 17 | 2018-04-23T20:17:58.000Z | 2021-04-12T19:28:40.000Z | Charm/models/__init__.py | TanyaAdams1/Charm | cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d | [
"BSD-3-Clause"
] | 52 | 2019-08-29T00:39:11.000Z | 2021-01-02T22:49:41.000Z | Charm/models/__init__.py | TanyaAdams1/Charm | cc6dd64d01f8cb4cf0eb92dadefcb7575d75ec9d | [
"BSD-3-Clause"
] | 3 | 2018-04-19T19:24:38.000Z | 2020-11-06T00:33:53.000Z | from .distributions import * | 28 | 28 | 0.821429 | 3 | 28 | 7.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 28 | 1 | 28 | 28 | 0.92 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
972a40940661deb5fcf836f6e6848b59739928c9 | 33 | py | Python | GameLauncher.py | YEETER1234321/PyhtonGame | 2ab1796d089bbeb2077ddff3f2aea6d8d4d22968 | [
"MIT"
] | null | null | null | GameLauncher.py | YEETER1234321/PyhtonGame | 2ab1796d089bbeb2077ddff3f2aea6d8d4d22968 | [
"MIT"
] | null | null | null | GameLauncher.py | YEETER1234321/PyhtonGame | 2ab1796d089bbeb2077ddff3f2aea6d8d4d22968 | [
"MIT"
] | null | null | null | import game
from sys import exit
| 11 | 20 | 0.818182 | 6 | 33 | 4.5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 33 | 2 | 21 | 16.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
975c7734da90e07557e858e066a9a86562e855d1 | 234 | py | Python | emojigg/errors.py | NextChai/emojigg | 0583d789b3ea1ff26d49275901107cb6529eff16 | [
"MIT"
] | null | null | null | emojigg/errors.py | NextChai/emojigg | 0583d789b3ea1ff26d49275901107cb6529eff16 | [
"MIT"
] | null | null | null | emojigg/errors.py | NextChai/emojigg | 0583d789b3ea1ff26d49275901107cb6529eff16 | [
"MIT"
] | null | null | null |
class NotImplemented(Exception):
def __init__(self, message, *args):
self.message = message
self.args = args
def __str__(self) -> str:
return self.message
class WrongType(Exception):
pass | 21.272727 | 39 | 0.619658 | 25 | 234 | 5.48 | 0.48 | 0.240876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.286325 | 234 | 11 | 40 | 21.272727 | 0.820359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.125 | 0 | 0.125 | 0.625 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 |
976246577d663752a224d60d76f4b1655877f09a | 70 | py | Python | server/app/utils/__init__.py | WagnerJM/sentinel | d978ebac1b2ede79f6bdd3c48167a278acf46654 | [
"MIT"
] | null | null | null | server/app/utils/__init__.py | WagnerJM/sentinel | d978ebac1b2ede79f6bdd3c48167a278acf46654 | [
"MIT"
] | null | null | null | server/app/utils/__init__.py | WagnerJM/sentinel | d978ebac1b2ede79f6bdd3c48167a278acf46654 | [
"MIT"
] | null | null | null | from uuid import UUID
def str2uuid(string):
return UUID(string)
| 11.666667 | 23 | 0.728571 | 10 | 70 | 5.1 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017857 | 0.2 | 70 | 5 | 24 | 14 | 0.892857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
9765e82f5ab2d7b995d95f9ab82525eae4a7554f | 274 | py | Python | Codewars/5kyu/simple-css-selector-comparison/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/5kyu/simple-css-selector-comparison/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/5kyu/simple-css-selector-comparison/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 2.7.6
Test.describe('Do some testing')
Test.assert_equals(compare('body p', 'div'), 'body p')
Test.assert_equals(compare('.class', '#id'), '#id')
Test.assert_equals(compare('div.big', '.small'), 'div.big')
Test.assert_equals(compare('.big', '.small'), '.small')
| 34.25 | 59 | 0.664234 | 41 | 274 | 4.341463 | 0.463415 | 0.224719 | 0.359551 | 0.516854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011905 | 0.080292 | 274 | 7 | 60 | 39.142857 | 0.694444 | 0.051095 | 0 | 0 | 0 | 0 | 0.302326 | 0 | 0 | 0 | 0 | 0 | 0.8 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
979027af717f3e116840e7ebf4fb0d584a31cd8c | 28,881 | py | Python | models/Unet.py | hemanths933/Segmentation_Unet | 701585b31df7e4159e2fdbe56aaca99d9a4a8ea9 | [
"MIT"
] | null | null | null | models/Unet.py | hemanths933/Segmentation_Unet | 701585b31df7e4159e2fdbe56aaca99d9a4a8ea9 | [
"MIT"
] | null | null | null | models/Unet.py | hemanths933/Segmentation_Unet | 701585b31df7e4159e2fdbe56aaca99d9a4a8ea9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from models.Model import Model
from losses.Pixelwise_weighted_loss import Pixelwise_weighted_loss
from metrics.IOU import IOU
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras import backend as K
import math
class Unet(Model):
def __init__(self):
print("unet init")
Model.__init__(self)
self.learning_rate = tf.train.exponential_decay(0.0001, tf.Variable(0, trainable=False),
10, 0.8, staircase=True)
self.loss = Pixelwise_weighted_loss().compute_loss
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.metric = IOU()
def crop(self,tensor, reference):
# print(tensor.shape.as_list()[1]-reference.shape.as_list()[1])
if ((tensor.shape.as_list()[1] - reference.shape.as_list()[1]) % 2 == 0):
offset_x = (tensor.shape.as_list()[1] - reference.shape.as_list()[1]) // 2
else:
offset_x = ((tensor.shape.as_list()[1] - reference.shape.as_list()[1]) // 2) + 1
# print(tensor.shape.as_list()[2]-reference.shape.as_list()[2])
if ((tensor.shape.as_list()[2] - reference.shape.as_list()[2]) % 2 == 0):
offset_y = (tensor.shape.as_list()[2] - reference.shape.as_list()[2]) // 2
else:
offset_y = ((tensor.shape.as_list()[2] - reference.shape.as_list()[2]) // 2) + 1
offset = [0, offset_x, offset_y, 0]
# print("offset is ",offset)
cropped_tensor = tf.slice(tensor, offset, [-1, reference.shape.as_list()[1], reference.shape.as_list()[2], -1])
return cropped_tensor
def concat(self,tensor,reference):
cropped = self.crop(tensor,reference)
return tf.concat([cropped,reference],axis=-1)
def network(self,images,reuse = tf.AUTO_REUSE):
with tf.variable_scope("weights", reuse=reuse):
input_layer = tf.reshape(images,[-1,572,572,3])
print(input_layer)
W1 = tf.get_variable(name='W1',shape=[3,3,3,64],initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv1 = tf.nn.conv2d(input_layer,W1,padding="VALID",strides=[1,1,1,1])
conv1 = tf.nn.relu(conv1)
#conv1 = tf.layers.conv2d(inputs=input_layer,filters=W1,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv1)
W2 = tf.get_variable(name='W2', shape=[3, 3, 64, 64], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv2 = tf.nn.conv2d(conv1,W2,padding="VALID",strides=[1,1,1,1])
conv2 = tf.nn.relu(conv2)
#conv2 = tf.layers.conv2d(inputs=conv1,filters=W2,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_4'
print(conv2)
pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
print(pool1)
W3 = tf.get_variable(name='W3', shape=[3, 3, 64, 128], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv3 = tf.nn.conv2d(pool1,W3,padding="VALID",strides=[1,1,1,1])
conv3 = tf.nn.relu(conv3)
#conv3 = tf.layers.conv2d(inputs=pool1,filters=W3,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv3)
W4 = tf.get_variable(name='W4', shape=[3, 3, 128, 128], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv4 = tf.nn.conv2d(conv3,W4,padding="VALID",strides=[1,1,1,1])
conv4 = tf.nn.relu(conv4)
#conv4 = tf.layers.conv2d(inputs=conv3,filters=W4,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_3'
print(conv4)
pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
print(pool2)
W5 = tf.get_variable(name='W5', shape=[3, 3, 128, 256], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv5 = tf.nn.conv2d(pool2,W5,padding="VALID",strides=[1,1,1,1])
conv5 = tf.nn.relu(conv5)
#conv5 = tf.layers.conv2d(inputs=pool2,filters=W5,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv5)
W6 = tf.get_variable(name='W6', shape=[3, 3, 256, 256], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv6 = tf.nn.conv2d(conv5,W6,padding="VALID",strides=[1,1,1,1])
conv6 = tf.nn.relu(conv6)
#conv6 = tf.layers.conv2d(inputs=conv5,filters=W6,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_2'
print(conv6)
pool3 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[2, 2], strides=2)
print(pool3)
W7 = tf.get_variable(name='W7', shape=[3, 3, 256, 512], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv7 = tf.nn.conv2d(pool3,W7,padding="VALID",strides=[1,1,1,1])
conv7 = tf.nn.relu(conv7)
#conv7 = tf.layers.conv2d(inputs=pool3,filters=W7,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv7)
W8 = tf.get_variable(name='W8', shape=[3, 3, 512, 512], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv8 = tf.nn.conv2d(conv7,W8,padding="VALID",strides=[1,1,1,1])
conv8 = tf.nn.relu(conv8)
#conv8 = tf.layers.conv2d(inputs=conv7,filters=W8,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_1'
print(conv8)
pool4 = tf.layers.max_pooling2d(inputs=conv8, pool_size=[2, 2], strides=2,name = 'p4')
print(pool4)
W9 = tf.get_variable(name='W9', shape=[3, 3, 512, 1024], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv9 = tf.nn.conv2d(pool4,W9,padding="VALID",strides=[1,1,1,1])
conv9 = tf.nn.relu(conv9)
#conv9 = tf.layers.conv2d(inputs=pool4,filters=W9,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv9)
W10 = tf.get_variable(name='W10', shape=[3, 3, 1024, 1024], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv10 = tf.nn.conv2d(conv9,W10,padding="VALID",strides=[1,1,1,1])
conv10 = tf.nn.relu(conv10)
#conv10 = tf.layers.conv2d(inputs=conv9,filters=W10,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv10)
W11 = tf.get_variable(name='W11', shape=[2, 2, 512, 1024], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
deconv1 = tf.nn.conv2d_transpose(conv10,W11,strides = [1,2,2,1],padding='VALID',output_shape=[1,56,56,512])
deconv1 = tf.nn.relu(deconv1)
#deconv1 = tf.layers.conv2d_transpose(inputs=conv10,filters=W11,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv1)
concat1 = self.concat(conv8,deconv1)
print("concat1:",concat1)
W12 = tf.get_variable(name='W12', shape=[3, 3, 1024, 512], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv11 = tf.nn.conv2d(concat1,W12,padding="VALID",strides=[1,1,1,1])
conv11 = tf.nn.relu(conv11)
#conv11 = tf.layers.conv2d(inputs=concat1,filters=W12,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv11)
W13 = tf.get_variable(name='W13', shape=[3, 3, 512, 512], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv12 = tf.nn.conv2d(conv11,W13,padding="VALID",strides=[1,1,1,1])
conv12 = tf.nn.relu(conv12)
#conv12 = tf.layers.conv2d(inputs=conv11,filters=W13,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv12)
W14 = tf.get_variable(name='W14', shape=[2, 2, 256, 512], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
deconv2 = tf.nn.conv2d_transpose(conv12,W14,strides = [1,2,2,1],padding='VALID',output_shape=[1,104,104,256])
deconv2 = tf.nn.relu(deconv2)
#deconv2 = tf.layers.conv2d_transpose(inputs=conv12,filters=W14,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv2)
concat2 = self.concat(conv6,deconv2)
print("concat2:",concat2)
W15 = tf.get_variable(name='W15', shape=[3, 3, 512, 256], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv13 = tf.nn.conv2d(concat2,W15,padding="VALID",strides=[1,1,1,1])
conv13 = tf.nn.relu(conv13)
#conv13 = tf.layers.conv2d(inputs=concat2,filters=W15,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv13)
W16 = tf.get_variable(name='W16', shape=[3, 3, 256, 256], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv14 = tf.nn.conv2d(conv13,W16,padding="VALID",strides=[1,1,1,1])
conv14 = tf.nn.relu(conv14)
#conv14 = tf.layers.conv2d(inputs=conv13,filters=W16,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv14)
W17 = tf.get_variable(name='W17', shape=[2, 2, 128, 256], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
deconv3 = tf.nn.conv2d_transpose(conv14,W17,strides = [1,2,2,1],padding='VALID',output_shape=[1,200,200,128])
deconv3 = tf.nn.relu(deconv3)
#deconv3 = tf.layers.conv2d_transpose(inputs=conv14,filters=W17,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv3)
concat3 = self.concat(conv4,deconv3)
print("concat3:",concat3)
W18 = tf.get_variable(name='W18', shape=[3, 3, 256, 128], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv15 = tf.nn.conv2d(concat3,W18,padding="VALID",strides=[1,1,1,1])
conv15 = tf.nn.relu(conv15)
#conv15 = tf.layers.conv2d(inputs=concat3,filters=W18,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv15)
W19 = tf.get_variable(name='W19', shape=[3, 3, 128, 128], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv16 = tf.nn.conv2d(conv15,W19,padding="VALID",strides=[1,1,1,1])
conv16 = tf.nn.relu(conv16)
#conv16 = tf.layers.conv2d(inputs=conv15,filters=W19,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv16)
W20 = tf.get_variable(name='W20', shape=[2, 2, 64, 128], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
deconv4 = tf.nn.conv2d_transpose(conv16,W20,strides = [1,2,2,1],padding='VALID',output_shape=[1,392,392,64])
deconv4 = tf.nn.relu(deconv4)
#deconv4 = tf.layers.conv2d_transpose(inputs=conv16,filters=W20,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv4)
concat4 = self.concat(conv2,deconv4)
print("concat4:",concat4)
W21 = tf.get_variable(name='W21', shape=[3, 3, 128, 64], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv17 = tf.nn.conv2d(concat4,W21,padding="VALID",strides=[1,1,1,1])
conv17 = tf.nn.relu(conv17)
#conv17 = tf.layers.conv2d(inputs=concat4,filters=W21,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv17)
W22 = tf.get_variable(name='W22', shape=[3, 3, 64, 64], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv18 = tf.nn.conv2d(conv17,W22,padding="VALID",strides=[1,1,1,1])
conv18 = tf.nn.relu(conv18)
#conv18 = tf.layers.conv2d(inputs=conv17,filters=W22,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv18)
W23 = tf.get_variable(name='W23', shape=[1, 1, 64, 1], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
output = tf.nn.conv2d(conv18,W23,padding="VALID",strides=[1,1,1,1])
#output = tf.layers.conv2d(inputs=conv18,filters=W23,kernel_size=[1, 1],padding="valid",activation=tf.nn.sigmoid)
print(output)
return output
def network_visualize(self,images,reuse=tf.AUTO_REUSE):
with tf.variable_scope("weights", reuse=reuse):
summarylist = []
input_layer = tf.reshape(images,[-1,572,572,3])
input_image_summary = tf.summary.image('input_image_summary',tf.reshape(input_layer,[input_layer.shape[3],input_layer.shape[1],input_layer.shape[2],3]),1)
summarylist.append(input_image_summary)
print(input_layer)
W1 = tf.get_variable(name='W1',shape=[3,3,3,64],initializer=tf.contrib.layers.xavier_initializer())
conv1 = tf.nn.conv2d(input_layer,W1,padding="VALID",strides=[1,1,1,1])
conv1 = tf.nn.relu(conv1)
#conv1 = tf.layers.conv2d(inputs=input_layer,filters=W1,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
conv1_summary = tf.summary.image('conv1_summary',tf.reshape(conv1,[conv1.shape[3],conv1.shape[1],conv1.shape[2],1]),64)
#summarylist.append(conv1_summary)
print(conv1)
W2 = tf.get_variable(name='W2', shape=[3, 3, 64, 64], initializer=tf.contrib.layers.xavier_initializer())
conv2 = tf.nn.conv2d(conv1,W2,padding="VALID",strides=[1,1,1,1])
conv2 = tf.nn.relu(conv2)
conv2_summary = tf.summary.image('conv2_summary',tf.reshape(conv2,[conv2.shape[3],conv2.shape[1],conv2.shape[2],1]),64)
#summarylist.append(conv2_summary)
#conv2 = tf.layers.conv2d(inputs=conv1,filters=W2,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_4'
print(conv2)
pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool1_summary = tf.summary.image('pool1_summary',tf.reshape(pool1,[pool1.shape[3],pool1.shape[1],pool1.shape[2],1]),64)
#summarylist.append(pool1_summary)
print(pool1)
W3 = tf.get_variable(name='W3', shape=[3, 3, 64, 128], initializer=tf.contrib.layers.xavier_initializer())
conv3 = tf.nn.conv2d(pool1,W3,padding="VALID",strides=[1,1,1,1])
conv3 = tf.nn.relu(conv3)
conv3_summary = tf.summary.image('conv3_summary',tf.reshape(conv3,[conv3.shape[3],conv3.shape[1],conv3.shape[2],1]),128)
#summarylist.append(conv3_summary)
#conv3 = tf.layers.conv2d(inputs=pool1,filters=W3,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv3)
W4 = tf.get_variable(name='W4', shape=[3, 3, 128, 128], initializer=tf.contrib.layers.xavier_initializer())
conv4 = tf.nn.conv2d(conv3,W4,padding="VALID",strides=[1,1,1,1])
conv4 = tf.nn.relu(conv4)
conv4_summary = tf.summary.image('conv4_summary',tf.reshape(conv4,[conv4.shape[3],conv4.shape[1],conv4.shape[2],1]),128)
#summarylist.append(conv4_summary)
#conv4 = tf.layers.conv2d(inputs=conv3,filters=W4,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_3'
print(conv4)
pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
pool2_summary = tf.summary.image('pool2_summary',tf.reshape(pool2,[pool2.shape[3],pool2.shape[1],pool2.shape[2],1]),128)
#summarylist.append(pool2_summary)
print(pool2)
W5 = tf.get_variable(name='W5', shape=[3, 3, 128, 256], initializer=tf.contrib.layers.xavier_initializer())
conv5 = tf.nn.conv2d(pool2,W5,padding="VALID",strides=[1,1,1,1])
conv5 = tf.nn.relu(conv5)
conv5_summary = tf.summary.image('conv5_summary',tf.reshape(conv5,[conv5.shape[3],conv5.shape[1],conv5.shape[2],1]),256)
#summarylist.append(conv5_summary)
#conv5 = tf.layers.conv2d(inputs=pool2,filters=W5,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv5)
W6 = tf.get_variable(name='W6', shape=[3, 3, 256, 256], initializer=tf.contrib.layers.xavier_initializer())
conv6 = tf.nn.conv2d(conv5,W6,padding="VALID",strides=[1,1,1,1])
conv6 = tf.nn.relu(conv6)
conv6_summary = tf.summary.image('conv6_summary',tf.reshape(conv6,[conv6.shape[3],conv6.shape[1],conv6.shape[2],1]),256)
#summarylist.append(conv6_summary)
#conv6 = tf.layers.conv2d(inputs=conv5,filters=W6,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_2'
print(conv6)
pool3 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[2, 2], strides=2)
pool3_summary = tf.summary.image('pool3_summary',tf.reshape(pool3,[pool3.shape[3],pool3.shape[1],pool3.shape[2],1]),256)
#summarylist.append(pool3_summary)
print(pool3)
W7 = tf.get_variable(name='W7', shape=[3, 3, 256, 512], initializer=tf.contrib.layers.xavier_initializer())
conv7 = tf.nn.conv2d(pool3,W7,padding="VALID",strides=[1,1,1,1])
conv7 = tf.nn.relu(conv7)
conv7_summary = tf.summary.image('conv7_summary',tf.reshape(conv7,[conv7.shape[3],conv7.shape[1],conv7.shape[2],1]),512)
#summarylist.append(conv7_summary)
#conv7 = tf.layers.conv2d(inputs=pool3,filters=W7,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv7)
W8 = tf.get_variable(name='W8', shape=[3, 3, 512, 512], initializer=tf.contrib.layers.xavier_initializer())
conv8 = tf.nn.conv2d(conv7,W8,padding="VALID",strides=[1,1,1,1])
conv8 = tf.nn.relu(conv8)
conv8_summary = tf.summary.image('conv8_summary',tf.reshape(conv8,[conv8.shape[3],conv8.shape[1],conv8.shape[2],1]),512)
#summarylist.append(conv8_summary)
#conv8 = tf.layers.conv2d(inputs=conv7,filters=W8,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)#,name = 'conv_merge_1'
print(conv8)
pool4 = tf.layers.max_pooling2d(inputs=conv8, pool_size=[2, 2], strides=2,name = 'p4')
pool4_summary = tf.summary.image('pool4_summary',tf.reshape(pool4,[pool4.shape[3],pool4.shape[1],pool4.shape[2],1]),512)
#summarylist.append(pool4_summary)
print(pool4)
W9 = tf.get_variable(name='W9', shape=[3, 3, 512, 1024], initializer=tf.contrib.layers.xavier_initializer())
conv9 = tf.nn.conv2d(pool4,W9,padding="VALID",strides=[1,1,1,1])
conv9 = tf.nn.relu(conv9)
conv9_summary = tf.summary.image('conv9_summary',tf.reshape(conv9,[conv9.shape[3],conv9.shape[1],conv9.shape[2],1]),1024)
#summarylist.append(conv9_summary)
#conv9 = tf.layers.conv2d(inputs=pool4,filters=W9,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv9)
W10 = tf.get_variable(name='W10', shape=[3, 3, 1024, 1024], initializer=tf.contrib.layers.xavier_initializer())
conv10 = tf.nn.conv2d(conv9,W10,padding="VALID",strides=[1,1,1,1])
conv10 = tf.nn.relu(conv10)
conv10_summary =tf.summary.image('conv10_summary',tf.reshape(conv10,[conv10.shape[3],conv10.shape[1],conv10.shape[2],1]),1024)
#summarylist.append(conv10_summary)
#conv10 = tf.layers.conv2d(inputs=conv9,filters=W10,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv10)
W11 = tf.get_variable(name='W11', shape=[2, 2, 512, 1024], initializer=tf.contrib.layers.xavier_initializer())
deconv1 = tf.nn.conv2d_transpose(conv10,W11,strides = [1,2,2,1],padding='VALID',output_shape=[1,56,56,512])
deconv1 = tf.nn.relu(deconv1)
#deconv1 = tf.layers.conv2d_transpose(inputs=conv10,filters=W11,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv1)
concat1 = self.concat(conv8,deconv1)
print("concat1:",concat1)
W12 = tf.get_variable(name='W12', shape=[3, 3, 1024, 512], initializer=tf.contrib.layers.xavier_initializer())
conv11 = tf.nn.conv2d(concat1,W12,padding="VALID",strides=[1,1,1,1])
conv11 = tf.nn.relu(conv11)
#conv11 = tf.layers.conv2d(inputs=concat1,filters=W12,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv11)
W13 = tf.get_variable(name='W13', shape=[3, 3, 512, 512], initializer=tf.initializers.random_normal(mean=0,stddev=math.sqrt(2/576)))
conv12 = tf.nn.conv2d(conv11,W13,padding="VALID",strides=[1,1,1,1])
conv12 = tf.nn.relu(conv12)
conv12_summary = tf.summary.image('conv12_summary',tf.reshape(conv12,[conv12.shape[3],conv12.shape[1],conv12.shape[2],1]),512)
#summarylist.append(conv12_summary)
#conv12 = tf.layers.conv2d(inputs=conv11,filters=W13,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv12)
W14 = tf.get_variable(name='W14', shape=[2, 2, 256, 512], initializer=tf.contrib.layers.xavier_initializer())
deconv2 = tf.nn.conv2d_transpose(conv12,W14,strides = [1,2,2,1],padding='VALID',output_shape=[1,104,104,256])
deconv2 = tf.nn.relu(deconv2)
#deconv2 = tf.layers.conv2d_transpose(inputs=conv12,filters=W14,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv2)
concat2 = self.concat(conv6,deconv2)
print("concat2:",concat2)
W15 = tf.get_variable(name='W15', shape=[3, 3, 512, 256], initializer=tf.contrib.layers.xavier_initializer())
conv13 = tf.nn.conv2d(concat2,W15,padding="VALID",strides=[1,1,1,1])
conv13 = tf.nn.relu(conv13)
#conv13 = tf.layers.conv2d(inputs=concat2,filters=W15,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv13)
W16 = tf.get_variable(name='W16', shape=[3, 3, 256, 256], initializer=tf.contrib.layers.xavier_initializer())
conv14 = tf.nn.conv2d(conv13,W16,padding="VALID",strides=[1,1,1,1])
conv14 = tf.nn.relu(conv14)
conv14_summary = tf.summary.image('conv14_summary',tf.reshape(conv14,[conv14.shape[3],conv14.shape[1],conv14.shape[2],1]),512)
#summarylist.append(conv14_summary)
#conv14 = tf.layers.conv2d(inputs=conv13,filters=W16,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv14)
W17 = tf.get_variable(name='W17', shape=[2, 2, 128, 256], initializer=tf.contrib.layers.xavier_initializer())
deconv3 = tf.nn.conv2d_transpose(conv14,W17,strides = [1,2,2,1],padding='VALID',output_shape=[1,200,200,128])
deconv3 = tf.nn.relu(deconv3)
#deconv3 = tf.layers.conv2d_transpose(inputs=conv14,filters=W17,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv3)
concat3 = self.concat(conv4,deconv3)
print("concat3:",concat3)
W18 = tf.get_variable(name='W18', shape=[3, 3, 256, 128], initializer=tf.contrib.layers.xavier_initializer())
conv15 = tf.nn.conv2d(concat3,W18,padding="VALID",strides=[1,1,1,1])
conv15 = tf.nn.relu(conv15)
#conv15 = tf.layers.conv2d(inputs=concat3,filters=W18,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv15)
W19 = tf.get_variable(name='W19', shape=[3, 3, 128, 128], initializer=tf.contrib.layers.xavier_initializer())
conv16 = tf.nn.conv2d(conv15,W19,padding="VALID",strides=[1,1,1,1])
conv16 = tf.nn.relu(conv16)
conv16_summary = tf.summary.image('conv16_summary',tf.reshape(conv16,[conv16.shape[3],conv16.shape[1],conv16.shape[2],1]),512)
#summarylist.append(conv16_summary)
#conv16 = tf.layers.conv2d(inputs=conv15,filters=W19,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv16)
W20 = tf.get_variable(name='W20', shape=[2, 2, 64, 128], initializer=tf.contrib.layers.xavier_initializer())
deconv4 = tf.nn.conv2d_transpose(conv16,W20,strides = [1,2,2,1],padding='VALID',output_shape=[1,392,392,64])
deconv4 = tf.nn.relu(deconv4)
#deconv4 = tf.layers.conv2d_transpose(inputs=conv16,filters=W20,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)
print(deconv4)
concat4 = self.concat(conv2,deconv4)
print("concat4:",concat4)
W21 = tf.get_variable(name='W21', shape=[3, 3, 128, 64], initializer=tf.contrib.layers.xavier_initializer())
conv17 = tf.nn.conv2d(concat4,W21,padding="VALID",strides=[1,1,1,1])
conv17 = tf.nn.relu(conv17)
#conv17 = tf.layers.conv2d(inputs=concat4,filters=W21,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv17)
W22 = tf.get_variable(name='W22', shape=[3, 3, 64, 64], initializer=tf.contrib.layers.xavier_initializer())
conv18 = tf.nn.conv2d(conv17,W22,padding="VALID",strides=[1,1,1,1])
conv18 = tf.nn.relu(conv18)
#conv18 = tf.layers.conv2d(inputs=conv17,filters=W22,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)
print(conv18)
W23 = tf.get_variable(name='W23', shape=[1, 1, 64, 1], initializer=tf.contrib.layers.xavier_initializer())
output = tf.nn.conv2d(conv18,W23,padding="VALID",strides=[1,1,1,1])
output_image_summary = tf.summary.image('output_image_summary',tf.reshape(output,[output.shape[3],output.shape[1],output.shape[2],1]),512)
summarylist.append(output_image_summary)
#output = tf.layers.conv2d(inputs=conv18,filters=W23,kernel_size=[1, 1],padding="valid",activation=tf.nn.sigmoid)
print(output)
return output,summarylist
def network_keras(self):
input_layer = layers.Input(shape=[572,572,3])
conv1 = layers.Conv2D(filters=64,kernel_size=[3, 3],padding="valid",activation=tf.nn.relu)(input_layer)
conv2 = layers.Conv2D(filters=64, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv1)
pool1 = layers.MaxPool2D(pool_size=[2, 2], strides=2)(conv2)
conv3 = layers.Conv2D(filters=128, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(pool1)
conv4 = layers.Conv2D(filters=128, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv3)
pool2 = layers.MaxPool2D(pool_size=[2, 2], strides=2)(conv4)
conv5 = layers.Conv2D(filters=256, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(pool2)
conv6 = layers.Conv2D(filters=256, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv5)
pool3 = layers.MaxPool2D(pool_size=[2, 2], strides=2)(conv6)
conv7 = layers.Conv2D(filters=512, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(pool3)
conv8 = layers.Conv2D(filters=512, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv7)
pool4 = layers.MaxPool2D(pool_size=[2, 2], strides=2)(conv8)
conv9 = layers.Conv2D(filters=1024, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(pool4)
conv10 = layers.Conv2D(filters=1024, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv9)
deconv1 = layers.Conv2DTranspose(filters=512,kernel_size=[2, 2],strides = (2,2),padding='valid',activation=tf.nn.relu)(conv10)
concat1 = layers.Lambda(self.concat,arguments={'reference':deconv1})(conv8)
#concat1 = self.concat(conv8, deconv1)
conv11 = layers.Conv2D(filters=512, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(concat1)
conv12 = layers.Conv2D(filters=512, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv11)
deconv2 = layers.Conv2DTranspose(filters=256, kernel_size=[2, 2], strides=(2, 2), padding='valid',activation=tf.nn.relu)(conv12)
concat2 = layers.Lambda(self.concat,arguments={'reference':deconv2})(conv6)
#concat2 = self.concat(conv6, deconv2)
conv13 = layers.Conv2D(filters=256, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(concat2)
conv14 = layers.Conv2D(filters=256, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv13)
deconv3 = layers.Conv2DTranspose(filters=128, kernel_size=[2, 2], strides=(2, 2), padding='valid',activation=tf.nn.relu)(conv14)
concat3 = layers.Lambda(self.concat,arguments={'reference':deconv3})(conv4)
#concat3 = self.concat(conv4, deconv3)
conv15 = layers.Conv2D(filters=128, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(concat3)
conv16 = layers.Conv2D(filters=128, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv15)
deconv4 = layers.Conv2DTranspose(filters=64, kernel_size=[2, 2], strides=(2, 2), padding='valid',
activation=tf.nn.relu)(conv16)
concat4 = layers.Lambda(self.concat,arguments={'reference':deconv4})(conv2)
#concat4 = self.concat(conv2, deconv4)
conv17 = layers.Conv2D(filters=64, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(concat4)
conv18 = layers.Conv2D(filters=64, kernel_size=[3, 3], padding="valid", activation=tf.nn.relu)(conv17)
output_layer = layers.Conv2D(filters=1, kernel_size=[1, 1], padding="valid", activation=tf.nn.sigmoid)(conv18)
model = models.Model(inputs = input_layer,outputs=output_layer)
return model
if __name__=='__main__':
net = Unet()
net.train() | 62.37797 | 162 | 0.672068 | 4,295 | 28,881 | 4.435623 | 0.042841 | 0.033384 | 0.046192 | 0.086925 | 0.837594 | 0.821479 | 0.785523 | 0.7745 | 0.753294 | 0.749829 | 0 | 0.096191 | 0.150133 | 28,881 | 463 | 163 | 62.37797 | 0.679976 | 0.216786 | 0 | 0.557994 | 0 | 0 | 0.03833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018809 | false | 0 | 0.031348 | 0 | 0.068966 | 0.203762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8ae5d9b404e6f6d284da150f6a3c364d8b9363b2 | 25 | py | Python | quad_sim_python/disp/__init__.py | ricardodeazambuja/quad_sim_python | f4afe76399b5325cd9136158b3f52c2b4d5170e5 | [
"MIT"
] | null | null | null | quad_sim_python/disp/__init__.py | ricardodeazambuja/quad_sim_python | f4afe76399b5325cd9136158b3f52c2b4d5170e5 | [
"MIT"
] | null | null | null | quad_sim_python/disp/__init__.py | ricardodeazambuja/quad_sim_python | f4afe76399b5325cd9136158b3f52c2b4d5170e5 | [
"MIT"
] | null | null | null | from .animation import *
| 12.5 | 24 | 0.76 | 3 | 25 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 1 | 25 | 25 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c10ac39259a258dc16c7bb3248ebfc8c16d818e3 | 679 | py | Python | dataset/script/cafe24product_check_prd_img_cnt.py | jireh-father/tensorflow-triplet-loss | c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80 | [
"MIT"
] | null | null | null | dataset/script/cafe24product_check_prd_img_cnt.py | jireh-father/tensorflow-triplet-loss | c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80 | [
"MIT"
] | null | null | null | dataset/script/cafe24product_check_prd_img_cnt.py | jireh-father/tensorflow-triplet-loss | c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80 | [
"MIT"
] | null | null | null | import os, glob
# image_path = "D:/data/fashion/image_retrieval/cafe24product/dataset_train"
#
# prd_list = glob.glob(os.path.join(image_path, "*"))
#
# cnt_list = {}
# for prd_dir in prd_list:
# cnt = len(glob.glob(os.path.join(prd_dir, "*.jpg")))
# if cnt not in cnt_list:
# cnt_list[cnt] = 0
# cnt_list[cnt] += 1
# print(cnt_list)
image_path = "D:/data/fashion/image_retrieval/cafe24product/dataset_test/query"
prd_list = glob.glob(os.path.join(image_path, "*"))
cnt_list = {}
for prd_dir in prd_list:
cnt = len(glob.glob(os.path.join(prd_dir, "*.jpg")))
if cnt not in cnt_list:
cnt_list[cnt] = 0
cnt_list[cnt] += 1
print(cnt_list)
| 26.115385 | 79 | 0.659794 | 112 | 679 | 3.767857 | 0.25 | 0.165877 | 0.14218 | 0.132701 | 0.938389 | 0.938389 | 0.938389 | 0.938389 | 0.938389 | 0.677725 | 0 | 0.01426 | 0.173785 | 679 | 25 | 80 | 27.16 | 0.737968 | 0.463918 | 0 | 0 | 0 | 0 | 0.198864 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c10d9d9d88e837fec3f75950a8fbc28ddee287ce | 61 | py | Python | cirrus/cli/components/functions/__init__.py | kave/cirrus-geo | cc10a6df55c5e124d87663e9c32b53b871216a62 | [
"Apache-2.0"
] | null | null | null | cirrus/cli/components/functions/__init__.py | kave/cirrus-geo | cc10a6df55c5e124d87663e9c32b53b871216a62 | [
"Apache-2.0"
] | null | null | null | cirrus/cli/components/functions/__init__.py | kave/cirrus-geo | cc10a6df55c5e124d87663e9c32b53b871216a62 | [
"Apache-2.0"
] | null | null | null | from ..base import Lambda
class Function(Lambda):
pass
| 10.166667 | 25 | 0.704918 | 8 | 61 | 5.375 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213115 | 61 | 5 | 26 | 12.2 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
c1822f1ade741e1f10bb047c3bc3c4d0149796d2 | 225 | py | Python | tcellmatch/models/layers/__init__.py | theislab/tcellmatch | ddd344e44147f97f35d6a4e7c3c7677981fd177e | [
"BSD-3-Clause"
] | 25 | 2019-08-14T22:39:40.000Z | 2022-03-02T15:42:35.000Z | tcellmatch/models/layers/__init__.py | theislab/tcellmatch | ddd344e44147f97f35d6a4e7c3c7677981fd177e | [
"BSD-3-Clause"
] | 2 | 2021-07-13T23:40:14.000Z | 2021-12-18T10:08:37.000Z | tcellmatch/models/layers/__init__.py | theislab/tcellmatch | ddd344e44147f97f35d6a4e7c3c7677981fd177e | [
"BSD-3-Clause"
] | 4 | 2020-02-21T20:43:41.000Z | 2022-03-21T14:38:58.000Z | from .layer_aa_embedding import LayerAaEmbedding
from .layer_attention import LayerMultiheadSelfAttention
from .layer_conv import LayerConv
from .layer_inception import LayerInception
from .layer_stack import build_layer_set
| 37.5 | 56 | 0.888889 | 28 | 225 | 6.857143 | 0.535714 | 0.234375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 225 | 5 | 57 | 45 | 0.936585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c1c3d352319f759c0de58f3e0858ba58fa2b29e6 | 44 | py | Python | applications/losses/__init__.py | khoehlein/fV-SRN | 601f3e952b090df92e875c233c2c9ca646523948 | [
"MIT"
] | null | null | null | applications/losses/__init__.py | khoehlein/fV-SRN | 601f3e952b090df92e875c233c2c9ca646523948 | [
"MIT"
] | null | null | null | applications/losses/__init__.py | khoehlein/fV-SRN | 601f3e952b090df92e875c233c2c9ca646523948 | [
"MIT"
] | null | null | null |
from .lossbuilder import LossBuilder
| 8.8 | 37 | 0.727273 | 4 | 44 | 8 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 44 | 4 | 38 | 11 | 0.969697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a9f7c005913eb9de5f4cb0b01703a241f9cae891 | 2,296 | py | Python | epytope/Data/pssms/smm/mat/B_44_03_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/smm/mat/B_44_03_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/smm/mat/B_44_03_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | B_44_03_9 = {0: {'A': -0.284, 'C': -0.093, 'E': -0.17, 'D': 0.057, 'G': 0.046, 'F': 0.151, 'I': 0.154, 'H': 0.093, 'K': 0.176, 'M': -0.47, 'L': 0.155, 'N': -0.047, 'Q': -0.105, 'P': 0.332, 'S': -0.244, 'R': 0.117, 'T': 0.193, 'W': -0.004, 'V': 0.127, 'Y': -0.185}, 1: {'A': 0.137, 'C': 0.0, 'E': -1.644, 'D': -0.154, 'G': 0.142, 'F': 0.001, 'I': -0.003, 'H': 0.054, 'K': 0.252, 'M': -0.164, 'L': 0.057, 'N': -0.072, 'Q': -0.196, 'P': 0.36, 'S': 0.056, 'R': 0.296, 'T': 0.19, 'W': 0.0, 'V': 0.322, 'Y': 0.368}, 2: {'A': -0.072, 'C': 0.147, 'E': 0.237, 'D': 0.709, 'G': 0.145, 'F': -0.191, 'I': -0.38, 'H': -0.017, 'K': 0.602, 'M': -0.549, 'L': -0.14, 'N': -0.435, 'Q': 0.199, 'P': 0.768, 'S': -0.086, 'R': 0.19, 'T': -0.112, 'W': -0.563, 'V': -0.231, 'Y': -0.221}, 3: {'A': -0.034, 'C': -0.017, 'E': 0.0, 'D': -0.054, 'G': 0.008, 'F': 0.016, 'I': -0.038, 'H': -0.03, 'K': 0.104, 'M': 0.038, 'L': 0.009, 'N': -0.005, 'Q': -0.075, 'P': 0.081, 'S': -0.044, 'R': 0.018, 'T': -0.012, 'W': 0.008, 'V': -0.005, 'Y': 0.032}, 4: {'A': -0.156, 'C': -0.057, 'E': 0.086, 'D': 0.105, 'G': 0.17, 'F': -0.109, 'I': -0.063, 'H': 0.029, 'K': 0.237, 'M': -0.052, 'L': -0.215, 'N': 0.002, 'Q': 0.194, 'P': 0.065, 'S': -0.109, 'R': 0.199, 'T': 0.087, 'W': -0.208, 'V': -0.106, 'Y': -0.097}, 5: {'A': -0.317, 'C': 0.042, 'E': 0.242, 'D': 0.089, 'G': -0.119, 'F': -0.181, 'I': -0.094, 'H': -0.048, 'K': 0.11, 'M': 0.061, 'L': 0.007, 'N': 0.0, 'Q': -0.001, 'P': 0.154, 'S': 0.02, 'R': 0.151, 'T': -0.088, 'W': 0.053, 'V': -0.079, 'Y': -0.001}, 6: {'A': -0.01, 'C': 0.002, 'E': 0.157, 'D': 0.169, 'G': 0.224, 'F': -0.123, 'I': -0.039, 'H': -0.075, 'K': -0.075, 'M': 0.072, 'L': -0.291, 'N': 0.136, 'Q': -0.055, 'P': 0.011, 'S': 0.036, 'R': -0.093, 'T': -0.015, 'W': -0.147, 'V': 0.104, 'Y': 0.011}, 7: {'A': -0.017, 'C': 0.004, 'E': 0.005, 'D': 0.015, 'G': -0.001, 'F': -0.006, 'I': 0.035, 'H': -0.009, 'K': -0.02, 'M': 0.013, 'L': -0.001, 'N': 0.008, 'Q': 0.022, 'P': 0.029, 'S': 0.001, 'R': -0.01, 'T': -0.005, 'W': -0.046, 'V': 0.004, 'Y': -0.023}, 8: {'A': 0.288, 'C': -0.137, 'E': 0.48, 'D': 0.408, 'G': 0.251, 'F': -0.662, 'I': -0.359, 'H': 0.124, 'K': 0.294, 'M': -0.221, 'L': -0.183, 'N': 0.361, 'Q': 0.65, 'P': 0.0, 'S': 0.174, 'R': 0.217, 'T': 0.553, 'W': -1.301, 'V': -0.02, 'Y': -0.915}, -1: {'con': 4.85249}} | 2,296 | 2,296 | 0.392422 | 557 | 2,296 | 1.612208 | 0.290844 | 0.020045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.371295 | 0.162456 | 2,296 | 1 | 2,296 | 2,296 | 0.095684 | 0 | 0 | 0 | 0 | 0 | 0.079669 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e706236965078c9cd5120168bce51646c37b19b8 | 46 | py | Python | zendesk/__init__.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 31 | 2015-01-02T01:44:18.000Z | 2021-06-10T16:29:54.000Z | zendesk/__init__.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 1 | 2015-04-08T07:54:50.000Z | 2015-04-09T14:29:38.000Z | zendesk/__init__.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 23 | 2015-01-12T23:42:34.000Z | 2021-09-08T11:20:12.000Z | from zendesk import *
from endpoints import *
| 15.333333 | 23 | 0.782609 | 6 | 46 | 6 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 46 | 2 | 24 | 23 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e795665bdb6d9a6a25571990aa478dec4d964eb5 | 11,650 | py | Python | lsq_quantizer/utils/lsq_network.py | yashbhalgat/QualcommAI-MicroNet-submission-MixNet | ea43bb6b08f2fd00a51742d62795f90fa386741f | [
"MIT"
] | 29 | 2019-11-07T02:52:03.000Z | 2022-02-19T03:45:22.000Z | lsq_quantizer/utils/lsq_network.py | yashbhalgat/QualcommAI-MicroNet-submission-nanoWRN | 8e4a2a253e68dc67eed91a5d3bb764afda5f32c8 | [
"MIT"
] | 1 | 2022-03-23T12:01:54.000Z | 2022-03-23T12:16:19.000Z | lsq_quantizer/utils/lsq_network.py | yashbhalgat/QualcommAI-MicroNet-submission-nanoWRN | 8e4a2a253e68dc67eed91a5d3bb764afda5f32c8 | [
"MIT"
] | 7 | 2019-12-18T02:05:35.000Z | 2021-02-03T03:44:50.000Z | import math
import torch
import torch.nn as nn
from .lsq_module import Conv2d
from .lsq_module import Linear
from .lsq_module import LsqActivation
def _make_layer(block, in_channels, planes, nblocks, stride=1, constr_activation=None):
layers = list()
downsample = stride != 1 or in_channels != planes * block.expansion
layers.append(block(in_channels, planes, stride, downsample, constr_activation))
in_channels = planes * block.expansion
for i in range(1, nblocks):
layers.append(block(in_channels, planes, constr_activation=constr_activation))
return nn.Sequential(*layers), planes * block.expansion
class _Identity(nn.Module):
def forward(self, x):
return x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, planes, stride=1, downsample=False, constr_activation=None):
super(BasicBlock, self).__init__()
self.quan_activation = constr_activation is not None
self.conv1 = Conv2d(in_channels, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=planes)
self.activation1 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.conv2 = Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=planes)
self.activation2 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.downsample = None
if downsample:
conv = Conv2d(in_channels, planes, kernel_size=1, stride=stride, padding=0, bias=False)
bn = nn.BatchNorm2d(num_features=planes)
self.downsample = nn.Sequential(*[conv, bn])
def forward(self, x):
residual = x if self.downsample is None else self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.activation1(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.activation2(out)
return out
class PreActivationBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, planes, stride=1, downsample=False, constr_activation=None):
super(PreActivationBlock, self).__init__()
self.quan_activation = constr_activation is not None
self.bn1 = nn.BatchNorm2d(num_features=in_channels)
self.activation1 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.conv1 = Conv2d(in_channels, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=planes)
self.activation2 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.conv2 = Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.downsample = None
if downsample:
bn = nn.BatchNorm2d(num_features=in_channels)
activation = LsqActivation(constr_activation) if self.quan_activation else _Identity()
conv = Conv2d(in_channels, planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.downsample = nn.Sequential(*[bn, activation, conv])
def forward(self, x):
residual = x if self.downsample is None else self.downsample(x)
out = self.bn1(x)
out = self.activation1(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.activation2(out)
out = self.conv2(out)
out += residual
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, planes, stride=1, downsample=None, constr_activation=None):
super(Bottleneck, self).__init__()
self.quan_activation = constr_activation is not None
self.conv1 = Conv2d(in_channels, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.activation1 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.conv2 = Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.activation2 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.conv3 = Conv2d(planes, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.activation3 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
self.downsample = None
if downsample:
conv = Conv2d(in_channels, planes * 4, kernel_size=1, stride=stride, padding=0, bias=False)
bn = nn.BatchNorm2d(num_features=planes * 4)
self.downsample = nn.Sequential(*[conv, bn])
def forward(self, x):
residual = x if self.downsample is None else self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.activation1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.activation2(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.activation3(out)
return out
class Resnet20(nn.Module):
def __init__(self, block, quan_first=False, quan_last=False, constr_activation=None):
super(Resnet20, self).__init__()
self.quan_first = quan_first
self.quan_last = quan_last
self.quan_activation = constr_activation is not None
if quan_first:
self.first_act = LsqActivation(constr_activation) if self.quan_activation else _Identity()
self.conv1 = Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=16)
self.activation1 = LsqActivation(constr_activation) if self.quan_activation else nn.ReLU(inplace=True)
in_channels = 16
self.layer1, in_channels = _make_layer(block, in_channels, planes=16, nblocks=3,
stride=1, constr_activation=constr_activation)
self.layer2, in_channels = _make_layer(block, in_channels, planes=32, nblocks=3,
stride=2, constr_activation=constr_activation)
self.layer3, in_channels = _make_layer(block, in_channels, planes=64, nblocks=3,
stride=2, constr_activation=constr_activation)
self.avgpool = nn.AvgPool2d(kernel_size=8, stride=1)
if quan_last:
self.last_act = LsqActivation(constr_activation) if self.quan_activation else _Identity()
self.fc = Linear(in_features=64, out_features=100, bias=True)
else:
self.fc = nn.Linear(in_features=64, out_features=100, bias=True)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
if self.quan_first:
x = self.first_act(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.activation1(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avgpool(out)
out = torch.flatten(out, start_dim=1)
if self.quan_last:
out = self.last_act(out)
out = self.fc(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, quan_first=False, quan_last=False, constr_activation=None):
super(ResNet, self).__init__()
self.quan_first = quan_first
self.quan_last = quan_last
self.quan_activation = constr_activation is not None
self.constr_activation = constr_activation
if self.quan_first:
self.first_act = LsqActivation(constr_activation) if self.quan_activation else _Identity()
self.conv1 = Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
if self.quan_activation:
self.activation1 = LsqActivation(constr_activation)
else:
self.activation1 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
in_channels = 64
self.layer1, in_channels = _make_layer(block, in_channels, planes=64, nblocks=layers[0],
stride=1, constr_activation=constr_activation)
self.layer2, in_channels = _make_layer(block, in_channels, planes=128, nblocks=layers[1],
stride=2, constr_activation=constr_activation)
self.layer3, in_channels = _make_layer(block, in_channels, planes=256, nblocks=layers[2],
stride=2, constr_activation=constr_activation)
self.layer4, in_channels = _make_layer(block, in_channels, planes=512, nblocks=layers[3],
stride=2, constr_activation=constr_activation)
self.avgpool = nn.AvgPool2d(7, stride=1)
if self.quan_last:
self.last_act = LsqActivation(constr_activation) if self.quan_activation else _Identity()
self.fc = Linear(512 * block.expansion, num_classes)
else:
self.fc = nn.Linear(512 * block.expansion, num_classes)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
if self.quan_first:
x = self.first_act(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.activation1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.quan_last:
x = self.last_act(x)
x = self.fc(x)
return x
def resnet18(quan_first=False, quan_last=False, constr_activation=None, preactivation=False):
block = PreActivationBlock if preactivation else BasicBlock
model = ResNet(block, [2, 2, 2, 2], quan_first=quan_first, quan_last=quan_last, constr_activation=constr_activation)
return model
def resnet20(quan_first=False, quan_last=False, constr_activation=None, preactivation=False):
block = PreActivationBlock if preactivation else BasicBlock
model = Resnet20(block, quan_first, quan_last, constr_activation)
return model
def resnet50(quan_first=False, quan_last=False, constr_activation=None, preactivation=False):
block = Bottleneck
model = ResNet(block, [3, 4, 6, 3], quan_first=quan_first, quan_last=quan_last, constr_activation=constr_activation)
return model
| 42.518248 | 120 | 0.643605 | 1,497 | 11,650 | 4.829659 | 0.082164 | 0.115076 | 0.046473 | 0.0426 | 0.818257 | 0.770263 | 0.720747 | 0.71065 | 0.705118 | 0.6787 | 0 | 0.030516 | 0.254592 | 11,650 | 273 | 121 | 42.673993 | 0.80205 | 0 | 0 | 0.538117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076233 | false | 0 | 0.026906 | 0.004484 | 0.188341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
99c3e437189981d2d550be0a20ae67512eeb4d68 | 29 | py | Python | Exercicios/ex021.py | Dobravoski/Exercicios-Python | e7169e1ee6954a7bc9216063845611107a13759f | [
"MIT"
] | null | null | null | Exercicios/ex021.py | Dobravoski/Exercicios-Python | e7169e1ee6954a7bc9216063845611107a13759f | [
"MIT"
] | null | null | null | Exercicios/ex021.py | Dobravoski/Exercicios-Python | e7169e1ee6954a7bc9216063845611107a13759f | [
"MIT"
] | null | null | null | print('IMPOSSIVEL DE FAZER')
| 14.5 | 28 | 0.758621 | 4 | 29 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103448 | 29 | 1 | 29 | 29 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0.655172 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
99f01c363bd879b385c9ff64f058d5a220657e01 | 45 | py | Python | docker/confluent-kafka/verify.py | arnarayanan/dockerfiles | 32a6299c3d22276b1df41dca5a0001246bdfd9d2 | [
"MIT"
] | 48 | 2018-12-12T12:18:09.000Z | 2022-03-05T02:23:42.000Z | docker/confluent-kafka/verify.py | arnarayanan/dockerfiles | 32a6299c3d22276b1df41dca5a0001246bdfd9d2 | [
"MIT"
] | 7,201 | 2018-12-24T17:14:17.000Z | 2022-03-31T13:39:12.000Z | docker/confluent-kafka/verify.py | HeyLaurelTestOrg/dockerfiles | 7cadb7a10c1307bfdcdb93ef6e890b56ccb1223a | [
"MIT"
] | 94 | 2018-12-17T10:59:21.000Z | 2022-03-29T12:59:30.000Z | import confluent_kafka
print("all is good")
| 11.25 | 22 | 0.777778 | 7 | 45 | 4.857143 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 45 | 3 | 23 | 15 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0.244444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
8201978a45387ba39226b2d97aaed78eb0c4d793 | 78 | py | Python | beginner/chapter_1/exam_1_7.py | Bokji24Dev/CodeStudy | 4c0fc852e6f472d082e9836c59ad22d229f74d87 | [
"MIT"
] | null | null | null | beginner/chapter_1/exam_1_7.py | Bokji24Dev/CodeStudy | 4c0fc852e6f472d082e9836c59ad22d229f74d87 | [
"MIT"
] | null | null | null | beginner/chapter_1/exam_1_7.py | Bokji24Dev/CodeStudy | 4c0fc852e6f472d082e9836c59ad22d229f74d87 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
count = 1
print(count)
count = count + 1
print(count)
| 11.142857 | 22 | 0.602564 | 12 | 78 | 3.916667 | 0.5 | 0.255319 | 0.468085 | 0.680851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 0.192308 | 78 | 6 | 23 | 13 | 0.698413 | 0.25641 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
821fc14587204d69dad0bfd037e28ee443e2e51d | 22 | py | Python | datadict/jupyter/__init__.py | 177arc/pandas-datadict | 743d76ea8b71e9b94de83c44d008e6f80ddd232b | [
"MIT"
] | 2 | 2019-10-21T19:32:54.000Z | 2020-12-19T06:14:58.000Z | datadict/jupyter/__init__.py | 177arc/pandas-datadict | 743d76ea8b71e9b94de83c44d008e6f80ddd232b | [
"MIT"
] | 6 | 2019-11-03T17:46:45.000Z | 2021-01-03T17:11:45.000Z | datadict/jupyter/__init__.py | 177arc/pandas-datadict | 743d76ea8b71e9b94de83c44d008e6f80ddd232b | [
"MIT"
] | null | null | null | from .jupyter import * | 22 | 22 | 0.772727 | 3 | 22 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 22 | 1 | 22 | 22 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
822a353a5d78275fd1d3cc2da0423ae308cfe110 | 136 | py | Python | app/views.py | Kalenai/timestamp-microservice | 326677ba55a51aa9cc173707154a3d3d5d688182 | [
"MIT"
] | 1 | 2018-05-10T14:04:58.000Z | 2018-05-10T14:04:58.000Z | app/views.py | Kalenai/timestamp-microservice | 326677ba55a51aa9cc173707154a3d3d5d688182 | [
"MIT"
] | null | null | null | app/views.py | Kalenai/timestamp-microservice | 326677ba55a51aa9cc173707154a3d3d5d688182 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from app import app
@app.route('/')
def homepage():
return render_template('index.html')
| 17 | 40 | 0.735294 | 19 | 136 | 5.157895 | 0.631579 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 136 | 7 | 41 | 19.428571 | 0.844828 | 0 | 0 | 0 | 0 | 0 | 0.080882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
413537614acd8bd0b26bcfca50df834bd1a40a4d | 42 | py | Python | yelp_reviews_scraper/__init__.py | meta-scraper/yelp-reviews-scraper-python | 0e0a200380b4c28bac10feb4b315b634e1926ce1 | [
"MIT"
] | null | null | null | yelp_reviews_scraper/__init__.py | meta-scraper/yelp-reviews-scraper-python | 0e0a200380b4c28bac10feb4b315b634e1926ce1 | [
"MIT"
] | null | null | null | yelp_reviews_scraper/__init__.py | meta-scraper/yelp-reviews-scraper-python | 0e0a200380b4c28bac10feb4b315b634e1926ce1 | [
"MIT"
] | null | null | null | from yelp_reviews_scraper.client import *
| 21 | 41 | 0.857143 | 6 | 42 | 5.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 42 | 1 | 42 | 42 | 0.894737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
417e201e8f73f5074d3a83d93de75fbf1c42b019 | 216 | py | Python | colossalai/zero/shard_utils/__init__.py | oikosohn/ColossalAI | fc5101f24c9a2ad8e7e16cb81e1ef7646a1061fd | [
"Apache-2.0"
] | null | null | null | colossalai/zero/shard_utils/__init__.py | oikosohn/ColossalAI | fc5101f24c9a2ad8e7e16cb81e1ef7646a1061fd | [
"Apache-2.0"
] | null | null | null | colossalai/zero/shard_utils/__init__.py | oikosohn/ColossalAI | fc5101f24c9a2ad8e7e16cb81e1ef7646a1061fd | [
"Apache-2.0"
] | null | null | null | from colossalai.zero.shard_utils.base_shard_strategy import BaseShardStrategy
from colossalai.zero.shard_utils.tensor_shard_strategy import TensorShardStrategy
__all__ = ['BaseShardStrategy', 'TensorShardStrategy']
| 43.2 | 81 | 0.875 | 23 | 216 | 7.782609 | 0.521739 | 0.156425 | 0.201117 | 0.256983 | 0.312849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060185 | 216 | 4 | 82 | 54 | 0.881773 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
41820c5ea72b589f23aa10f3bcc6dcc1843969f1 | 4,665 | py | Python | algorithms/agglomerate.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | 3 | 2018-08-10T21:11:09.000Z | 2019-07-26T13:47:24.000Z | algorithms/agglomerate.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | null | null | null | algorithms/agglomerate.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | 6 | 2018-03-05T20:14:11.000Z | 2020-07-23T18:39:16.000Z | from ibex.transforms import seg2seg, seg2gold
from ibex.data_structures import UnionFind
from ibex.utilities import dataIO
import numpy as np
import os
import struct
def Agglomerate(prefix, model_prefix, threshold=0.5):
# read the segmentation data
segmentation = dataIO.ReadSegmentationData(prefix)
# get the multicut filename (with graph weights)
multicut_filename = 'multicut/{}-{}.graph'.format(model_prefix, prefix)
# get the maximum segmentation value
max_value = np.amax(segmentation) + 1
# create union find data structure
union_find = [UnionFind.UnionFindElement(iv) for iv in range(max_value)]
# read in all of the labels and merge the result
with open(multicut_filename, 'rb') as fd:
# read the number of vertices and edges
nvertices, nedges, = struct.unpack('QQ', fd.read(16))
# read in all of the edges
for ie in range(nedges):
# read in both labels
label_one, label_two, = struct.unpack('QQ', fd.read(16))
# skip over the reduced labels
fd.read(16)
# read in the edge weight
edge_weight, = struct.unpack('d', fd.read(8))
# merge label one and label two in the union find data structure
if (edge_weight > threshold):
UnionFind.Union(union_find[label_one], union_find[label_two])
# create a mapping
mapping = np.zeros(max_value, dtype=np.int64)
# update the segmentation
for iv in range(max_value):
label = UnionFind.Find(union_find[iv]).label
mapping[iv] = label
# update the labels
agglomerated_segmentation = seg2seg.MapLabels(segmentation, mapping)
gold_filename = 'gold/{}_gold.h5'.format(prefix)
# TODO fix this code temporary filename
agglomeration_filename = 'multicut/{}-agglomerate.h5'.format(prefix)
# temporary - write h5 file
dataIO.WriteH5File(agglomerated_segmentation, agglomeration_filename, 'stack')
import time
start_time = time.time()
print 'Agglomeration - {}:'.format(threshold)
# create the command line
command = '~/software/PixelPred2Seg/comparestacks --stack1 {} --stackbase {} --dilate1 1 --dilatebase 1 --relabel1 --relabelbase --filtersize 100 --anisotropic'.format(agglomeration_filename, gold_filename)
# execute the command
os.system(command)
print time.time() - start_time
def MergeGroundTruth(prefix, model_prefix):
# read the segmentation data
segmentation = dataIO.ReadSegmentationData(prefix)
# get the multicut filename (with graph weights)
multicut_filename = 'multicut/{}-{}.graph'.format(model_prefix, prefix)
# read the gold data
gold = dataIO.ReadGoldData(prefix)
# read in the segmentation to gold mapping
mapping = seg2gold.Mapping(segmentation, gold)
# get the maximum segmentation value
max_value = np.amax(segmentation) + 1
# create union find data structure
union_find = [UnionFind.UnionFindElement(iv) for iv in range(max_value)]
# read in all of the labels
with open(multicut_filename, 'rb') as fd:
# read the number of vertices and edges
nvertices, nedges, = struct.unpack('QQ', fd.read(16))
# read in all of the edges
for ie in range(nedges):
# read in the two labels
label_one, label_two, = struct.unpack('QQ', fd.read(16))
# skip over the reduced labels and edge weight
fd.read(24)
# if the labels are the same and the mapping is non zero
if mapping[label_one] == mapping[label_two] and mapping[label_one]:
UnionFind.Union(union_find[label_one], union_find[label_two])
# create a mapping
mapping = np.zeros(max_value, dtype=np.int64)
# update the segmentation
for iv in range(max_value):
label = UnionFind.Find(union_find[iv]).label
mapping[iv] = label
merged_segmentation = seg2seg.MapLabels(segmentation, mapping)
gold_filename = 'gold/{}_gold.h5'.format(prefix)
# TODO fix this code temporary filename
truth_filename = 'multicut/{}-truth.h5'.format(prefix)
# temporary write h5 file
dataIO.WriteH5File(merged_segmentation, truth_filename, 'stack')
import time
start_time = time.time()
print 'Ground truth: '
# create the command line
command = '~/software/PixelPred2Seg/comparestacks --stack1 {} --stackbase {} --dilate1 1 --dilatebase 1 --relabel1 --relabelbase --filtersize 100 --anisotropic'.format(truth_filename, gold_filename)
# execute the command
os.system(command)
print time.time() - start_time
| 33.561151 | 210 | 0.676313 | 586 | 4,665 | 5.284983 | 0.220137 | 0.031966 | 0.012916 | 0.015499 | 0.740071 | 0.735551 | 0.735551 | 0.735551 | 0.735551 | 0.673555 | 0 | 0.014461 | 0.229153 | 4,665 | 138 | 211 | 33.804348 | 0.846774 | 0.240086 | 0 | 0.590164 | 0 | 0.032787 | 0.133409 | 0.029076 | 0 | 0 | 0 | 0.007246 | 0 | 0 | null | null | 0 | 0.131148 | null | null | 0.065574 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ec58099db927f4e615178d7c8c63f98d21999aa1 | 2,960 | py | Python | src/pattern/sorting/sort_patterns.py | leodenault/led-ws2801 | 1003dd25824fbbb96564145ee3c7c46711c9cd1a | [
"MIT"
] | null | null | null | src/pattern/sorting/sort_patterns.py | leodenault/led-ws2801 | 1003dd25824fbbb96564145ee3c7c46711c9cd1a | [
"MIT"
] | null | null | null | src/pattern/sorting/sort_patterns.py | leodenault/led-ws2801 | 1003dd25824fbbb96564145ee3c7c46711c9cd1a | [
"MIT"
] | null | null | null | from colour import led_colour
from pattern.pattern_chain import PatternChain
from pattern.sorting.bubble_sort_pattern import BubbleSortPattern
from pattern.sorting.colour_distributor import ColourDistributor
from pattern.sorting.merge_sort_pattern import MergeSortPattern
from pattern.sorting.sort_celebration import SortCelebration
def compare(colour1, colour2, colours):
return colours.index(colour1) < colours.index(colour2)
def create_bubble_sort_pattern(
num_leds,
distribution_duration,
sort_step_duration,
num_celebration_flashes,
celebration_flash_duration,
colour_palette):
"""Creates a pattern instance that sorts colours using the bubble sort
algorithm.
:param num_leds: the number of LEDs on the device.
:param distribution_duration: the time it should take to distribute all
colours across the LED strip.
:param sort_step_duration: the amount of time a single step takes to
execute.
:param num_celebration_flashes: the number of times the LEDs should flash
in celebration.
:param celebration_flash_duration: the amount of time, in seconds,
it should take for a single flash to occur.
:param colour_palette: the palette of colours which will be used to
randomly distribute colours onto the strip.
"""
strip_data = [led_colour.BLACK] * num_leds
return PatternChain([
lambda:
ColourDistributor(
num_leds, distribution_duration, colour_palette, strip_data),
lambda:
BubbleSortPattern(colour_palette, strip_data, sort_step_duration),
lambda:
SortCelebration(
num_celebration_flashes, celebration_flash_duration, strip_data),
])
def create_merge_sort_pattern(
num_leds,
distribution_duration,
sort_step_duration,
num_celebration_flashes,
celebration_flash_duration,
colour_palette):
"""Creates a pattern instance that sorts colours using the merge sort
algorithm.
:param num_leds: the number of LEDs on the device.
:param distribution_duration: the time it should take to distribute all
colours across the LED strip.
:param sort_step_duration: the amount of time a single step takes to
execute.
:param num_celebration_flashes: the number of times the LEDs should flash
in celebration.
:param celebration_flash_duration: the amount of time, in seconds,
it should take for a single flash to occur.
:param colour_palette: the palette of colours which will be used to
randomly distribute colours onto the strip.
"""
strip_data = [led_colour.BLACK] * num_leds
return PatternChain([
lambda:
ColourDistributor(
num_leds, distribution_duration, colour_palette, strip_data),
lambda:
MergeSortPattern(num_leds, sort_step_duration, colour_palette, strip_data),
lambda:
SortCelebration(
num_celebration_flashes, celebration_flash_duration, strip_data),
])
| 36.097561 | 83 | 0.749324 | 377 | 2,960 | 5.66313 | 0.193634 | 0.029508 | 0.044965 | 0.050585 | 0.769087 | 0.769087 | 0.755035 | 0.755035 | 0.755035 | 0.755035 | 0 | 0.001691 | 0.200676 | 2,960 | 81 | 84 | 36.54321 | 0.900676 | 0.420946 | 0 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.136364 | 0.022727 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6b9d3a76779d1715fda723b8fbcdc4ef91b87cd8 | 22,402 | py | Python | src/model/model.py | theLongLab/phx-nn | 81e0f87faa82d6995b37095815655224cb5bf438 | [
"MIT"
] | null | null | null | src/model/model.py | theLongLab/phx-nn | 81e0f87faa82d6995b37095815655224cb5bf438 | [
"MIT"
] | null | null | null | src/model/model.py | theLongLab/phx-nn | 81e0f87faa82d6995b37095815655224cb5bf438 | [
"MIT"
] | null | null | null | # src/model/model.py
from collections import OrderedDict
from pathlib import Path
import pickle
from typing import (
Any,
Dict,
List,
Mapping,
MutableMapping,
Optional,
NoReturn,
Sequence,
Tuple,
Union,
)
from adabound import AdaBound
from optuna import Trial
import pytorch_lightning as pl
from sklearn.base import BaseEstimator
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from xgboost import XGBRegressor
from src.base import BaseDataLoader, BaseModel
from src.data_loader import SumStatDataLoader
from src.model.loss import phx_param_map
GPU_COUNT: int = torch.cuda.device_count()
HEADS: Tuple[str, ...] = (
"num_gap_window",
"inpool_gap_supp_min",
"allpool_gap_supp_min",
"l1_region_size_min",
"l1_region_size_max",
"l2_region_size_min",
"l2_region_size_max",
"l34_region_mis_tol",
"l56_region_mis_tol",
"l78_region_mis_tol",
"est_indv_perpool",
"aem_max_l",
"bfs_mis_tol",
"aem_conv_cutoff",
"aem_zero_cutoff",
"aem_regional_crosspool_freq_cutoff",
"aem_regional_hapsetsize_min",
"aem_regional_hapsetsize_max",
"regr_one_vec_weight",
"regr_hap_vc_weight",
"regr_hap_11_weight",
"regr_regional_hapsetsize_max",
"regr_gamma_min",
"regr_n_gamma",
"regr_mis_tol",
"regr_coverage_weight",
"regr_distance_max_weight",
"regr_max_regions",
)
INPUT_SIZE: int = 40
class HParamNet(BaseModel):
def __init__(self, trial: Trial, hparam_space: Mapping[str, Sequence]) -> None:
super().__init__()
self.layers: List[nn.Module] = []
self.batchnorms: List[nn.Module] = []
self.dropouts: List[nn.Module] = []
# Order of PHX params for inner model input.
self.heads: Tuple[str, ...] = HEADS
final_output_dim: int = self.__optimize_layers(trial=trial, hparam_space=hparam_space)
self._batch_size: int = trial.suggest_categorical("batch_size", hparam_space["batch_size"])
self._lr: float = trial.suggest_uniform("lr", hparam_space["lr"][0], hparam_space["lr"][1])
self._final_lr: float = trial.suggest_uniform(
"final_lr", hparam_space["final_lr"][0], hparam_space["final_lr"][1]
)
self.__build_model(final_output_dim)
def __optimize_layers(self, trial: Trial, hparam_space: Mapping[str, Sequence]) -> int:
# Optimize the number of layers, hidden units in each layer and dropout rate.
n_layers: int = trial.suggest_int(
"n_layers", hparam_space["n_layers"][0], hparam_space["n_layers"][1]
)
input_dropout_rate: float = trial.suggest_loguniform(
"input_dropout_rate", hparam_space["dropout_rate"][0], 0.2
)
dropout_rate: float = trial.suggest_loguniform(
"dropout_rate", input_dropout_rate, hparam_space["dropout_rate"][1]
)
input_dim: int = INPUT_SIZE # number of input summary statistics
i: int
for i in range(n_layers):
output_dim: int = trial.suggest_int(
"n_units_l{}".format(i), hparam_space["n_units_l"][0], hparam_space["n_units_l"][1],
)
self.layers.append(nn.Linear(input_dim, output_dim))
if i != n_layers - 1:
self.batchnorms.append(nn.BatchNorm1d(num_features=output_dim))
self.dropouts.append(nn.Dropout(dropout_rate))
# Input layer needs a lower dropout rate.
if i == 0:
self.dropouts[-1] = nn.Dropout(input_dropout_rate)
input_dim = output_dim
return output_dim
def __build_model(self, final_output_dim: int) -> None:
# Assign layers as class variables (PyTorch requirement).
layer: nn.Module
for i, layer in enumerate(self.layers):
layer_name: str = "fc{}".format(i)
setattr(self, layer_name, layer)
nn.init.xavier_normal_(getattr(self, layer_name).weight) # init weight
# Assign batchnorm actions as class variables (Pytorch requirement)
batchnorm: nn.Module
for i, batchnorm in enumerate(self.batchnorms):
setattr(self, "bn{}".format(i), batchnorm)
# Assign dropout actions as class variables (PyTorch requirement).
dropout: nn.Module
for i, dropout in enumerate(self.dropouts):
setattr(self, "dropout{}".format(i), dropout)
# Assign heads as class variables (PyTorch requirement).
head: str
for i, head in enumerate(self.heads):
setattr(self, head, nn.Linear(final_output_dim, 1))
nn.init.xavier_normal_(getattr(self, head).weight)
def forward(self, X):
x: torch.Tensor = X
for layer, batchnorm, dropout in zip(self.layers[:-1], self.batchnorms, self.dropouts):
x = F.leaky_relu(layer(x))
x = batchnorm(x)
x = dropout(x)
for layer in self.layers[-1:]:
x = F.leaky_relu(layer(x))
# Clamp values of PHX parameters at each head.
num_gap_window: torch.Tensor = torch.clamp(torch.round(self.num_gap_window(x)).int(), 1, 5)
inpool_gap_supp_min: torch.Tensor = torch.sigmoid(self.inpool_gap_supp_min(x))
allpool_gap_supp_min: torch.Tensor = torch.sigmoid(self.allpool_gap_supp_min(x))
l1_region_size_min: torch.Tensor = torch.clamp(
torch.round(self.l1_region_size_min(x)).int(), 3, 13
)
l1_region_size_max: torch.Tensor = torch.clamp(
torch.round(self.l1_region_size_max(x)).int(), max(l1_region_size_min).item() + 1, 16
)
l2_region_size_min: torch.Tensor = l1_region_size_min
l2_region_size_max: torch.Tensor = l1_region_size_max
l34_region_mis_tol: torch.Tensor = torch.clamp(
torch.round(self.l34_region_mis_tol(x)).int(), 0, 5
)
l56_region_mis_tol: torch.Tensor = torch.clamp(
torch.round(self.l56_region_mis_tol(x)).int(), 1, 6
)
l78_region_mis_tol: torch.Tensor = torch.clamp(
torch.round(self.l78_region_mis_tol(x)).int(), 2, 7
)
est_indv_perpool: torch.Tensor = torch.clamp(
torch.round(self.est_indv_perpool(x)).int(), 1000, 1000000
)
aem_max_l: torch.Tensor = torch.clamp(torch.round(self.aem_max_l(x)).int(), 0, 6)
tens: torch.Tensor
for tens in aem_max_l:
tens += 1 if tens.item() % 2 == 0 else 0
bfs_mis_tol: torch.Tensor = torch.clamp(torch.round(self.bfs_mis_tol(x)).int(), 0, 10)
aem_conv_cutoff: torch.Tensor = torch.clamp(self.aem_conv_cutoff(x), 0, 1e-4)
aem_zero_cutoff: torch.Tensor = torch.clamp(self.aem_zero_cutoff(x), 0, 1e-6)
aem_regional_crosspool_freq_cutoff: torch.Tensor = torch.clamp(
self.aem_regional_crosspool_freq_cutoff(x), 0, 0.05
)
aem_regional_hapsetsize_min: torch.Tensor = torch.clamp(
torch.round(self.aem_regional_hapsetsize_min(x)).int(), 1, 10
)
aem_regional_hapsetsize_max: torch.Tensor = torch.clamp(
torch.round(self.aem_regional_hapsetsize_max(x)).int(),
max(11, max(aem_regional_hapsetsize_min).item() + 4),
100,
)
regr_one_vec_weight: torch.Tensor = torch.clamp(self.regr_one_vec_weight(x), 1, 10)
regr_hap_vc_weight: torch.Tensor = torch.clamp(self.regr_hap_vc_weight(x), 1, 10)
regr_hap_11_weight: torch.Tensor = torch.clamp(self.regr_hap_11_weight(x), 1, 10)
regr_regional_hapsetsize_max: torch.Tensor = torch.clamp(
torch.round(self.regr_regional_hapsetsize_max(x)).int(), 11, 100
)
regr_gamma_min: torch.Tensor = torch.sigmoid(self.regr_gamma_min(x)) / 4
regr_n_gamma: torch.Tensor = torch.clamp(torch.round(self.regr_n_gamma(x)).int(), 2, 10)
regr_mis_tol: torch.Tensor = torch.clamp(torch.round(self.regr_mis_tol(x)).int(), 8, 20)
regr_coverage_weight: torch.Tensor = torch.clamp(self.regr_coverage_weight(x), 0.5, 2.5)
regr_distance_max_weight: torch.Tensor = torch.clamp(self.regr_distance_max_weight(x), 1, 5)
regr_max_regions: torch.Tensor = torch.clamp(
torch.round(self.regr_max_regions(x)).int(), 2, 3
)
output: List[torch.Tensor] = []
for head in self.heads:
output.append(eval(head).float())
return tuple(output)
class LightningHParamNet(pl.LightningModule):
def __init__(
self,
est: nn.Module,
cvtrain_data: torch.Tensor,
cvval_data: torch.Tensor,
loss_fn: BaseEstimator,
shuffle: bool = False,
validation_split: Union[float, int] = 0.0,
num_workers: int = 0,
) -> None:
super().__init__()
# Avoid overriding `LightningModule` attributes (e.g. self.model)
self._model: nn.Module = est
self.loss_fn: XGBRegressor = loss_fn
self._dataloader_args: Dict[str, Any] = {
"data": cvtrain_data,
"batch_size": self._model._batch_size,
"shuffle": shuffle,
"validation_split": validation_split,
"num_workers": num_workers,
}
self._cvval_data: torch.Tensor = cvval_data
def forward(self, X: torch.Tensor) -> Tuple[torch.Tensor, ...]:
return self._model(X)
def training_step(self, batch: Tuple, batch_idx: int) -> Dict[str, torch.Tensor]:
X: torch.Tensor
y: torch.Tensor
X, y = batch
output: Tuple[torch.Tensor, ...] = self.forward(X)
loss: torch.Tensor = phx_param_map(output=output, gbtree=self.loss_fn)
return {"loss": loss}
def validation_step(self, batch: Tuple, batch_idx: int) -> Dict[str, torch.Tensor]:
X: torch.Tensor
y: torch.Tensor
X, y = batch
output: Tuple[torch.Tensor, ...] = self.forward(X)
val_loss: torch.Tensor = phx_param_map(output=output, gbtree=self.loss_fn)
return {"val_loss": val_loss}
def validation_end(self, outputs: Sequence[Mapping]) -> Dict[str, Dict[str, float]]:
x: Dict[str, torch.Tensor]
mean_val_loss: Union[Any, torch.Tensor] = sum( # Union Any to make mypy happy
x["val_loss"].clone().detach() for x in outputs
) / len(outputs)
return {"log": {"mean_val_loss": mean_val_loss.item()}}
def test_step(self, batch: Tuple, batch_idx: int) -> Dict[str, torch.Tensor]:
X: torch.Tensor
y: torch.Tensor
X, y = batch
output: Tuple[torch.Tensor, ...] = self.forward(X)
test_loss: torch.Tensor = phx_param_map(output=output, gbtree=self.loss_fn)
return {"test_loss": test_loss}
def test_end(self, outputs: Sequence[Mapping]) -> Dict[str, Dict[str, float]]:
x: Dict[str, torch.Tensor]
mean_test_loss: Union[Any, torch.Tensor] = sum( # Union Any to make mypy mappy
x["test_loss"].clone().detach() for x in outputs
) / len(outputs)
return {"log": {"mean_test_loss": mean_test_loss.item()}}
def configure_optimizers(self) -> Tuple[List[Optimizer], List[StepLR]]:
self.opt: Optimizer = AdaBound(
self._model.parameters(), lr=self._model._lr, final_lr=self._model._final_lr
)
self.lr_sch: StepLR = StepLR(self.opt, 50)
return [self.opt], [self.lr_sch]
def on_epoch_end(self) -> None:
self.lr_sch.step()
@pl.data_loader
def train_dataloader(self) -> DataLoader:
self._train_dataloader: BaseDataLoader = SumStatDataLoader(**self._dataloader_args)
return self._train_dataloader
@pl.data_loader
def val_dataloader(self) -> Optional[DataLoader]:
return self._train_dataloader.split_validation()
@pl.data_loader
def test_dataloader(self) -> DataLoader:
return SumStatDataLoader(
data=self._cvval_data,
batch_size=512,
shuffle=False,
validation_split=0.0,
num_workers=4 * GPU_COUNT,
)
class PoolHapXNet(pl.LightningModule):
def __init__(
self,
model_hparams: Mapping[str, Any],
train_data: torch.Tensor,
val_data: torch.Tensor,
loss_fn: BaseEstimator,
shuffle: bool = False,
validation_split: Union[float, int] = 0.0,
num_workers: int = 0,
) -> None:
super().__init__()
self.layers: List[nn.Module] = []
self.batchnorms: List[nn.Module] = []
self.dropouts: List[nn.Module] = []
self.heads: Tuple[str, ...] = HEADS
final_output_dim: int = self.__set_layers(model_hparams)
self.__build_model(final_output_dim)
self.loss_fn: XGBRegressor = loss_fn
self._dataloader_args: Dict[str, Any] = {
"data": train_data,
"batch_size": model_hparams["batch_size"],
"shuffle": shuffle,
"validation_split": validation_split,
"num_workers": num_workers,
# "test_set": True,
}
self._lr: float = model_hparams["lr"]
self._final_lr: float = model_hparams["final_lr"]
self._val_data: torch.Tensor = val_data
def __set_layers(self, model_hparams: Mapping[str, Any]) -> int:
# Optimize the number of layers, hidden units in each layer and dropout rate.
n_layers: int = model_hparams["n_layers"]
input_dropout_rate: float = model_hparams["input_dropout_rate"]
dropout_rate: float = model_hparams["dropout_rate"]
input_dim: int = INPUT_SIZE # number of input summary statistics
i: int
for i in range(n_layers):
output_dim: int = model_hparams["n_units_l{}".format(i)]
self.layers.append(nn.Linear(input_dim, output_dim))
if i != n_layers - 1:
self.batchnorms.append(nn.BatchNorm1d(num_features=output_dim))
self.dropouts.append(nn.Dropout(dropout_rate))
# Input layer needs a lower dropout rate.
if i == 0:
self.dropouts[-1] = nn.Dropout(input_dropout_rate)
input_dim = output_dim
return output_dim
def __build_model(self, final_output_dim: int) -> None:
# Assign layers as class variables (PyTorch requirement).
layer: nn.Module
for i, layer in enumerate(self.layers):
layer_name: str = "fc{}".format(i)
setattr(self, layer_name, layer)
nn.init.xavier_normal_(getattr(self, layer_name).weight) # init weight
# Assign batchnorm actions as class variables (Pytorch requirement)
batchnorm: nn.Module
for i, batchnorm in enumerate(self.batchnorms):
setattr(self, "bn{}".format(i), batchnorm)
# Assign dropout actions as class variables (PyTorch requirement).
dropout: nn.Module
for i, dropout in enumerate(self.dropouts):
setattr(self, "dropout{}".format(i), dropout)
# Assign heads as class variables (PyTorch requirement).
head: str
for i, head in enumerate(self.heads):
setattr(self, head, nn.Linear(final_output_dim, 1))
nn.init.xavier_normal_(getattr(self, head).weight)
def forward(self, X):
x: torch.Tensor = X
for layer, batchnorm, dropout in zip(self.layers[:-1], self.batchnorms, self.dropouts):
x = F.leaky_relu(layer(x))
x = batchnorm(x)
x = dropout(x)
for layer in self.layers[-1:]:
x = F.leaky_relu(layer(x))
# Clamp values of PHX parameters at each head.
num_gap_window: torch.Tensor = torch.clamp(torch.round(self.num_gap_window(x)).int(), 1, 5)
inpool_gap_supp_min: torch.Tensor = torch.sigmoid(self.inpool_gap_supp_min(x))
allpool_gap_supp_min: torch.Tensor = torch.sigmoid(self.allpool_gap_supp_min(x))
l1_region_size_min: torch.Tensor = torch.clamp(
torch.round(self.l1_region_size_min(x)).int(), 3, 13
)
l1_region_size_max: torch.Tensor = torch.clamp(
torch.round(self.l1_region_size_max(x)).int(), max(l1_region_size_min).item() + 1, 16
)
l2_region_size_min: torch.Tensor = l1_region_size_min
l2_region_size_max: torch.Tensor = l1_region_size_max
l34_region_mis_tol: torch.Tensor = torch.clamp(
torch.round(self.l34_region_mis_tol(x)).int(), 0, 5
)
l56_region_mis_tol: torch.Tensor = torch.clamp(
torch.round(self.l56_region_mis_tol(x)).int(), 1, 6
)
l78_region_mis_tol: torch.Tensor = torch.clamp(
torch.round(self.l78_region_mis_tol(x)).int(), 2, 7
)
est_indv_perpool: torch.Tensor = torch.clamp(
torch.round(self.est_indv_perpool(x)).int(), 1000, 1000000
)
aem_max_l: torch.Tensor = torch.clamp(torch.round(self.aem_max_l(x)).int(), 0, 6)
tens: torch.Tensor
for tens in aem_max_l:
tens += 1 if tens.item() % 2 == 0 else 0
bfs_mis_tol: torch.Tensor = torch.clamp(torch.round(self.bfs_mis_tol(x)).int(), 0, 10)
aem_conv_cutoff: torch.Tensor = torch.clamp(self.aem_conv_cutoff(x), 0, 1e-4)
aem_zero_cutoff: torch.Tensor = torch.clamp(self.aem_zero_cutoff(x), 0, 1e-6)
aem_regional_crosspool_freq_cutoff: torch.Tensor = torch.clamp(
self.aem_regional_crosspool_freq_cutoff(x), 0, 0.05
)
aem_regional_hapsetsize_min: torch.Tensor = torch.clamp(
torch.round(self.aem_regional_hapsetsize_min(x)).int(), 1, 10
)
aem_regional_hapsetsize_max: torch.Tensor = torch.clamp(
torch.round(self.aem_regional_hapsetsize_max(x)).int(),
max(11, max(aem_regional_hapsetsize_min).item() + 4),
100,
)
regr_one_vec_weight: torch.Tensor = torch.clamp(self.regr_one_vec_weight(x), 1, 10)
regr_hap_vc_weight: torch.Tensor = torch.clamp(self.regr_hap_vc_weight(x), 1, 10)
regr_hap_11_weight: torch.Tensor = torch.clamp(self.regr_hap_11_weight(x), 1, 10)
regr_regional_hapsetsize_max: torch.Tensor = torch.clamp(
torch.round(self.regr_regional_hapsetsize_max(x)).int(), 11, 100
)
regr_gamma_min: torch.Tensor = torch.sigmoid(self.regr_gamma_min(x)) / 4
regr_n_gamma: torch.Tensor = torch.clamp(torch.round(self.regr_n_gamma(x)).int(), 2, 10)
regr_mis_tol: torch.Tensor = torch.clamp(torch.round(self.regr_mis_tol(x)).int(), 8, 20)
regr_coverage_weight: torch.Tensor = torch.clamp(self.regr_coverage_weight(x), 0.5, 2.5)
regr_distance_max_weight: torch.Tensor = torch.clamp(self.regr_distance_max_weight(x), 1, 5)
regr_max_regions: torch.Tensor = torch.clamp(
torch.round(self.regr_max_regions(x)).int(), 2, 3
)
output: List[torch.Tensor] = []
for head in self.heads:
output.append(eval(head).float())
return output
def training_step(
self, batch: Tuple, batch_idx: int
) -> Dict[str, Union[Dict[str, torch.Tensor], torch.Tensor]]:
X: torch.Tensor
y: torch.Tensor
X, y = batch
output: Tuple[torch.Tensor, ...] = self.forward(X)
loss: torch.Tensor = phx_param_map(output=output, gbtree=self.loss_fn)
tqdm_dict: Dict[str, torch.Tensor] = {"train_loss": loss}
log_output: OrderedDict = OrderedDict(
{"loss": loss, "progress_bar": tqdm_dict, "log": tqdm_dict}
)
return log_output
def validation_step(self, batch: Tuple, batch_idx: int) -> Dict[str, torch.Tensor]:
X: torch.Tensor
y: torch.Tensor
X, y = batch
output: Tuple[torch.Tensor, ...] = self.forward(X)
val_loss: torch.Tensor = phx_param_map(output=output, gbtree=self.loss_fn)
return {"val_loss": val_loss}
def validation_end(self, outputs: Sequence[Mapping]) -> Dict[str, Dict[str, float]]:
x: Dict[str, torch.Tensor]
mean_val_loss: Union[Any, torch.Tensor] = sum( # Union Any to make mypy happy
x["val_loss"].clone().detach() for x in outputs
) / len(outputs)
tqdm_dict: Dict[str, float] = {"mean_val_loss": mean_val_loss.item()}
return {"progress_bar": tqdm_dict, "log": {"mean_val_loss": mean_val_loss.item()}}
def test_step(self, batch: Tuple, batch_idx: int) -> Dict[str, torch.Tensor]:
X: torch.Tensor
y: torch.Tensor
X, y = batch
output: Tuple[torch.Tensor, ...] = self.forward(X)
test_loss: torch.Tensor = phx_param_map(output=output, gbtree=self.loss_fn)
return {"test_loss": test_loss}
def test_end(self, outputs: Sequence[Mapping]) -> Dict[str, Dict[str, float]]:
x: Dict[str, torch.Tensor]
mean_test_loss: Union[Any, torch.Tensor] = sum( # Union Any to make mypy happy
x["test_loss"].clone().detach() for x in outputs
) / len(outputs)
tqdm_dict: Dict[str, float] = {"mean_test_loss": mean_test_loss.item()}
print("\n\n==\nMean Test Loss: {}\n==\n\n".format(mean_test_loss.item()))
return {"progress_bar": tqdm_dict, "log": {"mean_test_loss": mean_test_loss.item()}}
def configure_optimizers(self) -> Tuple[List[Optimizer], List[StepLR]]:
self.opt: Optimizer = AdaBound(self.parameters(), lr=self._lr, final_lr=self._final_lr)
self.lr_sch: StepLR = StepLR(self.opt, 50)
return [self.opt], [self.lr_sch]
def on_epoch_end(self) -> None:
self.lr_sch.step()
@pl.data_loader
def train_dataloader(self) -> DataLoader:
self._train_dataloader: BaseDataLoader = SumStatDataLoader(**self._dataloader_args)
return self._train_dataloader
@pl.data_loader
def val_dataloader(self) -> Optional[DataLoader]:
return self._train_dataloader.split_validation()
@pl.data_loader
def test_dataloader(self) -> DataLoader:
return SumStatDataLoader(
data=self._val_data,
batch_size=512,
shuffle=False,
validation_split=0.0,
num_workers=4 * GPU_COUNT,
# test_set=True,
)
| 39.649558 | 100 | 0.634051 | 3,022 | 22,402 | 4.439444 | 0.086036 | 0.090191 | 0.063208 | 0.072004 | 0.846154 | 0.81701 | 0.802847 | 0.791145 | 0.784735 | 0.779815 | 0 | 0.016344 | 0.246183 | 22,402 | 564 | 101 | 39.719858 | 0.778113 | 0.052406 | 0 | 0.652747 | 0 | 0 | 0.052925 | 0.006604 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065934 | false | 0 | 0.03956 | 0.010989 | 0.162637 | 0.002198 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.