hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
203fe4cfb3578293c8dd17e02591ea8d9aa60e33
20
py
Python
students/K33402/Kondrashov_Egor/LR4/app/core/__init__.py
emina13/ITMO_ICT_WebDevelopment_2021-2022
498a6138e352e7e0ca40d1eb301bc29416158f51
[ "MIT" ]
7
2021-09-02T08:20:58.000Z
2022-01-12T11:48:07.000Z
back/app/core/__init__.py
e-kondr01/bookings-web-app
8a3ffba778fb70ad17cdec1f5f0d4b2861cfe0c8
[ "0BSD" ]
76
2021-09-17T23:01:50.000Z
2022-03-18T16:42:03.000Z
back/app/core/__init__.py
e-kondr01/bookings-web-app
8a3ffba778fb70ad17cdec1f5f0d4b2861cfe0c8
[ "0BSD" ]
60
2021-09-04T16:47:39.000Z
2022-03-21T04:41:27.000Z
from . import admin
10
19
0.75
3
20
5
1
0
0
0
0
0
0
0
0
0
0
0
0.2
20
1
20
20
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
647de12b495300600b5769bfe710d70a7a73d563
1,273
py
Python
ascii_art.py
jamieabw/Hangman
f5535cb6077074423b005eb8ecbdffe36c76f46f
[ "MIT" ]
null
null
null
ascii_art.py
jamieabw/Hangman
f5535cb6077074423b005eb8ecbdffe36c76f46f
[ "MIT" ]
1
2021-12-30T04:39:18.000Z
2021-12-30T10:22:59.000Z
ascii_art.py
jamieabw/Hangman
f5535cb6077074423b005eb8ecbdffe36c76f46f
[ "MIT" ]
null
null
null
# Art belongs to trinket.io. # Everything else belongs to me art = ( """ ------ | | | | | | | | ---------- """, """ ------ | | | 0 | | | | | ---------- """, """ ------ | | | 0 | + | | | | ---------- """, """ ------ | | | 0 | -+ | | | | ---------- """, """ ------ | | | 0 | -+- | | | | ---------- """, """ ------ | | | 0 | /-+- | | | | ---------- """, """ ------ | | | 0 | /-+-/ | | | | ---------- """, """ ------ | | | 0 | /-+-/ | | | | | ---------- """, """ ------ | | | 0 | /-+-/ | | | | | | ---------- """, """ ------ | | | 0 | /-+-/ | | | | | | | | ---------- """, """ ------ | | | 0 | /-+-/ | | | | | | | | | | ---------- """ )
10.184
31
0.046347
21
1,273
2.809524
0.428571
0.305085
0.40678
0.474576
0.169492
0.169492
0.169492
0.169492
0.169492
0
0
0.022026
0.643362
1,273
125
32
10.184
0.10793
0.043991
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
b38cb0e6cd837eeaf0b7e6b9fcae2f34a10c698a
25
py
Python
femda/__init__.py
Andrewwango/femda
c072a065687ab32805bdfa48d34c75e05ffd959e
[ "MIT" ]
null
null
null
femda/__init__.py
Andrewwango/femda
c072a065687ab32805bdfa48d34c75e05ffd959e
[ "MIT" ]
null
null
null
femda/__init__.py
Andrewwango/femda
c072a065687ab32805bdfa48d34c75e05ffd959e
[ "MIT" ]
null
null
null
from .femda_ import FEMDA
25
25
0.84
4
25
5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.12
25
1
25
25
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b3b7dad34afa57b9dcbd94273a4d00174d43a1da
208
py
Python
pykitinfo/tests/test_nothing.py
microchip-pic-avr-tools/pykitinfo
3cc0d73dbdece229ea1456c19baf076985ec84a9
[ "MIT" ]
null
null
null
pykitinfo/tests/test_nothing.py
microchip-pic-avr-tools/pykitinfo
3cc0d73dbdece229ea1456c19baf076985ec84a9
[ "MIT" ]
null
null
null
pykitinfo/tests/test_nothing.py
microchip-pic-avr-tools/pykitinfo
3cc0d73dbdece229ea1456c19baf076985ec84a9
[ "MIT" ]
null
null
null
import unittest from mock import patch from mock import Mock class TestGetNothing(unittest.TestCase): """Tests for nothing""" def setUp(self): pass def test_nothing(self): pass
16
40
0.673077
26
208
5.346154
0.615385
0.115108
0.201439
0
0
0
0
0
0
0
0
0
0.25
208
12
41
17.333333
0.891026
0.081731
0
0.25
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0.375
0
0.75
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
1
0
1
0
0
6
b3daecda5937a2dfa52d7ca718703f869eee95e9
25
py
Python
sfaira_extension/data/__init__.py
theislab/sfaira_extension
22910c7f20e48defbcb5b82c2137e97ee7ed428f
[ "BSD-3-Clause" ]
null
null
null
sfaira_extension/data/__init__.py
theislab/sfaira_extension
22910c7f20e48defbcb5b82c2137e97ee7ed428f
[ "BSD-3-Clause" ]
3
2020-11-03T17:37:37.000Z
2021-02-15T12:47:52.000Z
sfaira_extension/data/__init__.py
theislab/sfaira_extension
22910c7f20e48defbcb5b82c2137e97ee7ed428f
[ "BSD-3-Clause" ]
1
2022-03-03T15:11:14.000Z
2022-03-03T15:11:14.000Z
from . import dataloaders
25
25
0.84
3
25
7
1
0
0
0
0
0
0
0
0
0
0
0
0.12
25
1
25
25
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
377ca24f5775cb0572b7e0b143c51bd3506911e5
19
py
Python
__init__.py
IngenuityEngine/cOS
c4b62e8b0809e889cf5733abc3dedddb0841a06d
[ "MIT" ]
null
null
null
__init__.py
IngenuityEngine/cOS
c4b62e8b0809e889cf5733abc3dedddb0841a06d
[ "MIT" ]
1
2018-02-19T17:54:31.000Z
2018-02-19T17:54:31.000Z
__init__.py
IngenuityEngine/cOS
c4b62e8b0809e889cf5733abc3dedddb0841a06d
[ "MIT" ]
null
null
null
from cOS import *
9.5
18
0.684211
3
19
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.263158
19
1
19
19
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
37b64b6a57cb3fe452101bb1dac2914aee9fa960
94
py
Python
airthings/devices/utils.py
kotlarz/airthings
0cae26911797473d6269ac72690e37512de62af6
[ "MIT" ]
2
2020-06-04T09:51:33.000Z
2021-02-17T09:32:29.000Z
airthings/devices/utils.py
kotlarz/airthings
0cae26911797473d6269ac72690e37512de62af6
[ "MIT" ]
6
2020-06-17T07:47:01.000Z
2020-06-27T10:06:20.000Z
airthings/devices/utils.py
kotlarz/airthings
0cae26911797473d6269ac72690e37512de62af6
[ "MIT" ]
1
2020-09-17T11:09:05.000Z
2020-09-17T11:09:05.000Z
def parse_radon_data(radon_data): return radon_data if 0 <= radon_data <= 16383 else None
31.333333
59
0.755319
16
94
4.125
0.625
0.545455
0
0
0
0
0
0
0
0
0
0.076923
0.170213
94
2
60
47
0.769231
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
80c80dc5dd040f0b262eecf40dfcb315a04f7054
179
py
Python
examples/simple/views.py
h3/django-dajaxice
1e31b967e6ea0d5acc84acd04cae4da004e8d861
[ "BSD-3-Clause" ]
60
2015-01-09T23:02:52.000Z
2021-03-27T13:46:55.000Z
examples/simple/views.py
h3/django-dajaxice
1e31b967e6ea0d5acc84acd04cae4da004e8d861
[ "BSD-3-Clause" ]
15
2015-02-19T15:06:15.000Z
2017-10-27T15:06:47.000Z
examples/simple/views.py
h3/django-dajaxice
1e31b967e6ea0d5acc84acd04cae4da004e8d861
[ "BSD-3-Clause" ]
55
2015-01-02T22:27:13.000Z
2021-04-27T19:34:15.000Z
# Create your views here. from django.shortcuts import render from dajaxice.core import dajaxice_functions def index(request): return render(request, 'simple/index.html')
17.9
47
0.776536
24
179
5.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0.145251
179
9
48
19.888889
0.901961
0.128492
0
0
0
0
0.11039
0
0
0
0
0
0
1
0.25
false
0
0.5
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
ff06e4dc239c864ccc0eaeb658096e4903021604
1,565
py
Python
tests/transformersx/test_bert_tokenizer_ext.py
aicanhelp/ai-transformers
fa30031fa7360ee6d4fd3d016a3c81a23cfe8af1
[ "MIT" ]
1
2020-08-03T12:59:20.000Z
2020-08-03T12:59:20.000Z
tests/transformersx/test_bert_tokenizer_ext.py
aicanhelp/ai-transformers
fa30031fa7360ee6d4fd3d016a3c81a23cfe8af1
[ "MIT" ]
null
null
null
tests/transformersx/test_bert_tokenizer_ext.py
aicanhelp/ai-transformers
fa30031fa7360ee6d4fd3d016a3c81a23cfe8af1
[ "MIT" ]
null
null
null
from transformersx.model.bert.tokenization_bert_ext import ( _create_token_type_ids_from_sequences_for_multiple_sentences, _get_special_tokens_mask_for_multiple_sentences ) class Test_BertTokenizerExt(): def test_get_special_tokens_mask_for_multiple_sentences(self): token_ids_0 = [1, 0, 1, 0] token_ids_1 = [1, 0, 1, 0, 9, 99, 0] token_ids_2 = [1, 0, 1, 0, 9, 99, 0, 9, 99, 1] new_token_ids = _get_special_tokens_mask_for_multiple_sentences(token_ids_0, 99) assert new_token_ids == [1, 0, 0, 0, 0, 1] new_token_ids = _get_special_tokens_mask_for_multiple_sentences(token_ids_1, 99) assert new_token_ids == [1, 0, 0, 0, 0, 1, 1, 0, 1] new_token_ids = _get_special_tokens_mask_for_multiple_sentences(token_ids_2, 99) assert new_token_ids == [1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1] def test_create_token_type_ids_from_sequences_for_multiple_sentences(self): token_ids_0 = [1, 0, 1, 0] token_ids_1 = [1, 0, 1, 0, 9, 99, 0] token_ids_2 = [1, 0, 1, 0, 9, 99, 0, 9, 99, 1] new_token_ids = _create_token_type_ids_from_sequences_for_multiple_sentences(token_ids_0, 99) assert new_token_ids == [0, 0, 0, 0, 0, 0] new_token_ids = _create_token_type_ids_from_sequences_for_multiple_sentences(token_ids_1, 99) assert new_token_ids == [0, 0, 0, 0, 0, 0, 1, 1, 1] new_token_ids = _create_token_type_ids_from_sequences_for_multiple_sentences(token_ids_2, 99) assert new_token_ids == [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]
53.965517
101
0.686262
277
1,565
3.404332
0.111913
0.055143
0.060445
0.050901
0.901379
0.898197
0.898197
0.834571
0.834571
0.747614
0
0.101695
0.208307
1,565
28
102
55.892857
0.659403
0
0
0.24
0
0
0
0
0
0
0
0
0.24
1
0.08
false
0
0.04
0
0.16
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ff202835eea0ea6f1271b2f4b697e3e5049af062
8,236
py
Python
tests/download/test_views.py
alphagov-mirror/document-download-api
fa865117606eb63f0fdde3f2b3a353c51f1b4bbd
[ "MIT" ]
null
null
null
tests/download/test_views.py
alphagov-mirror/document-download-api
fa865117606eb63f0fdde3f2b3a353c51f1b4bbd
[ "MIT" ]
null
null
null
tests/download/test_views.py
alphagov-mirror/document-download-api
fa865117606eb63f0fdde3f2b3a353c51f1b4bbd
[ "MIT" ]
null
null
null
import io from unittest import mock from uuid import UUID import pytest from flask import url_for from app.utils.store import DocumentStoreError @pytest.fixture def store(mocker): return mocker.patch('app.download.views.document_store') def test_document_download(client, store): store.get.return_value = { 'body': io.BytesIO(b'PDF document contents'), 'mimetype': 'application/pdf', 'size': 100 } response = client.get( url_for( 'download.download_document', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', # 32 \x00 bytes ) ) assert response.status_code == 200 assert response.get_data() == b'PDF document contents' assert dict(response.headers) == { 'Cache-Control': mock.ANY, 'Expires': mock.ANY, 'Content-Length': '100', 'Content-Type': 'application/pdf', 'X-B3-SpanId': 'None', 'X-B3-TraceId': 'None', 'X-Robots-Tag': 'noindex, nofollow' } store.get.assert_called_once_with( UUID('00000000-0000-0000-0000-000000000000'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), bytes(32) ) @pytest.mark.parametrize("mimetype, expected_extension, expected_content_type_header", [ ('text/csv', 'csv', 'text/csv; charset=utf-8'), ('text/rtf', 'rtf', 'text/rtf; charset=utf-8'), ('application/rtf', 'rtf', 'application/rtf'), ]) def test_force_document_download( client, store, mimetype, expected_extension, expected_content_type_header ): """ Test that file responses have the expected Content-Type/Content-Disposition required for browsers to download files in a way that is useful for users. """ store.get.return_value = { 'body': io.BytesIO(b'a,b,c'), 'mimetype': mimetype, 'size': 100 } document_id = 'ffffffff-ffff-ffff-ffff-ffffffffffff' response = client.get( url_for( 'download.download_document', service_id='00000000-0000-0000-0000-000000000000', document_id=document_id, key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', # 32 \x00 bytes ) ) assert response.status_code == 200 assert response.get_data() == b'a,b,c' assert dict(response.headers) == { 'Cache-Control': mock.ANY, 'Expires': mock.ANY, 'Content-Length': '100', 'Content-Type': expected_content_type_header, 'Content-Disposition': f'attachment; filename={document_id}.{expected_extension}', 'X-B3-SpanId': 'None', 'X-B3-TraceId': 'None', 'X-Robots-Tag': 'noindex, nofollow' } store.get.assert_called_once_with( UUID('00000000-0000-0000-0000-000000000000'), UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'), bytes(32) ) def test_document_download_with_extension(client, store): store.get.return_value = { 'body': io.BytesIO(b'a,b,c'), 'mimetype': 'application/pdf', 'size': 100 } response = client.get( url_for( 'download.download_document', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', # 32 \x00 bytes extension='.pdf', ) ) assert response.status_code == 200 assert response.get_data() == b'a,b,c' assert dict(response.headers) == { 'Cache-Control': mock.ANY, 'Expires': mock.ANY, 'Content-Length': '100', 'Content-Type': 'application/pdf', 'X-B3-SpanId': 'None', 'X-B3-TraceId': 'None', 'X-Robots-Tag': 'noindex, nofollow' } def test_document_download_without_decryption_key(client, store): response = client.get( url_for( 'download.download_document', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', ) ) assert response.status_code == 400 assert response.json == {'error': 'Missing decryption key'} def test_document_download_with_invalid_decryption_key(client): response = client.get( url_for( 'download.download_document', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉?' ) ) assert response.status_code == 400 assert response.json == {'error': 'Invalid decryption key'} def test_document_download_document_store_error(client, store): store.get.side_effect = DocumentStoreError('something went wrong') response = client.get( url_for( 'download.download_document', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' ) ) assert response.status_code == 400 assert response.json == {'error': 'something went wrong'} def test_get_document_metadata_without_decryption_key(client, store): response = client.get( url_for( 'download.get_document_metadata', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', ) ) assert response.status_code == 400 assert response.json == {'error': 'Missing decryption key'} def test_get_document_metadata_with_invalid_decryption_key(client): response = client.get( url_for( 'download.get_document_metadata', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉🐦⁉?' ) ) assert response.status_code == 400 assert response.json == {'error': 'Invalid decryption key'} def test_get_document_metadata_document_store_error(client, store): store.get_document_metadata.side_effect = DocumentStoreError('something went wrong') response = client.get( url_for( 'download.get_document_metadata', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' ) ) assert response.status_code == 400 assert response.json == {'error': 'something went wrong'} def test_get_document_metadata_when_document_is_in_s3(client, store): store.get_document_metadata.return_value = {'mimetype': 'text/plain'} response = client.get( url_for( 'download.get_document_metadata', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' ) ) assert response.status_code == 200 assert response.headers['X-Robots-Tag'] == 'noindex, nofollow' assert response.json == { 'file_exists': 'True', 'document': { 'direct_file_url': ''.join([ 'http://document-download.test', '/services/00000000-0000-0000-0000-000000000000', '/documents/ffffffff-ffff-ffff-ffff-ffffffffffff.txt', '?key=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' ]) } } def test_get_document_metadata_when_document_is_not_in_s3(client, store): store.get_document_metadata.return_value = None response = client.get( url_for( 'download.get_document_metadata', service_id='00000000-0000-0000-0000-000000000000', document_id='ffffffff-ffff-ffff-ffff-ffffffffffff', key='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' ) ) assert response.status_code == 200 assert response.json == {'file_exists': 'False', 'document': None} assert response.headers['X-Robots-Tag'] == 'noindex, nofollow'
32.298039
90
0.63611
882
8,236
5.835601
0.151927
0.043521
0.043521
0.054401
0.835632
0.809792
0.800466
0.751117
0.725471
0.725471
0
0.083492
0.236523
8,236
254
91
32.425197
0.723601
0.023434
0
0.604762
0
0
0.357811
0.229647
0
0
0
0
0.138095
1
0.057143
false
0
0.028571
0.004762
0.090476
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
20705591f0c93c1702d2c26833be64e9087abf1c
70
py
Python
applications/kpax/controllers/home.py
arsfeld/fog-web2py
32263a03d4183dcaf7537c87edcb4e574d4bec6e
[ "BSD-3-Clause" ]
null
null
null
applications/kpax/controllers/home.py
arsfeld/fog-web2py
32263a03d4183dcaf7537c87edcb4e574d4bec6e
[ "BSD-3-Clause" ]
null
null
null
applications/kpax/controllers/home.py
arsfeld/fog-web2py
32263a03d4183dcaf7537c87edcb4e574d4bec6e
[ "BSD-3-Clause" ]
1
2019-03-13T08:20:25.000Z
2019-03-13T08:20:25.000Z
if not session.token: redirect(LOGIN) def index(): return dict()
14
37
0.685714
10
70
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.185714
70
4
38
17.5
0.842105
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0
0.333333
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
208f3bdb24d16341294647fa849932e87e275497
97
py
Python
sigcom/ingest/__init__.py
dcic/signature-commons-controller
b69c4063235d927da27891e8a30d2822c6768a66
[ "Apache-2.0" ]
null
null
null
sigcom/ingest/__init__.py
dcic/signature-commons-controller
b69c4063235d927da27891e8a30d2822c6768a66
[ "Apache-2.0" ]
2
2020-06-09T14:52:34.000Z
2020-11-06T18:02:49.000Z
sigcom/ingest/__init__.py
dcic/signature-commons-controller
b69c4063235d927da27891e8a30d2822c6768a66
[ "Apache-2.0" ]
null
null
null
from sigcom.util.importdir import importdir_deep importdir_deep(__file__, __package__, globals())
48.5
48
0.85567
12
97
6.083333
0.75
0.356164
0
0
0
0
0
0
0
0
0
0
0.061856
97
2
49
48.5
0.802198
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
20d8f1aed3be39447ba8899b5f580669c3afed1f
76
py
Python
Ygdra.Python/ygdra/__init__.py
bsherwin/ProjectY
1fdbc595030c006b252530e685a6d4fd313a13c2
[ "MIT" ]
null
null
null
Ygdra.Python/ygdra/__init__.py
bsherwin/ProjectY
1fdbc595030c006b252530e685a6d4fd313a13c2
[ "MIT" ]
null
null
null
Ygdra.Python/ygdra/__init__.py
bsherwin/ProjectY
1fdbc595030c006b252530e685a6d4fd313a13c2
[ "MIT" ]
null
null
null
from .ygdra import * from .dataprofile import * from .lambdaYgdra import *
15.2
26
0.75
9
76
6.333333
0.555556
0.350877
0
0
0
0
0
0
0
0
0
0
0.171053
76
4
27
19
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
20e50428069b728cafb9a812fc46779cc1650ecb
4,102
py
Python
todo/Julian pruebas y partes de proyecto/Ensamblaje_finalp.py
JRobayo99/Proyecto-RestauranteEAN
3a7f71dbf5c09c1beafb4027ab3d7e9a4934ab30
[ "MIT" ]
null
null
null
todo/Julian pruebas y partes de proyecto/Ensamblaje_finalp.py
JRobayo99/Proyecto-RestauranteEAN
3a7f71dbf5c09c1beafb4027ab3d7e9a4934ab30
[ "MIT" ]
null
null
null
todo/Julian pruebas y partes de proyecto/Ensamblaje_finalp.py
JRobayo99/Proyecto-RestauranteEAN
3a7f71dbf5c09c1beafb4027ab3d7e9a4934ab30
[ "MIT" ]
null
null
null
import tkinter import tkinter as tk class Demo1: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.frame = tk.Frame(self.master) self.button1 = tk.Button(self.frame, text = 'Menu del día', width = 80, command = self.new_window) self.button1.place(x=300,y=300) self.frame.pack() def new_window(self): self.newWindow = tk.Toplevel(self.master) self.app = Demo2(self.newWindow) class Demo2: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.quitButton = tk.Button(self.frame, text = 'Volver a inicio', width = 80, command = self.close_windows) self.quitButton.pack() self.frame.pack() def close_windows(self): self.master.destroy() class Demo12: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.frame = tk.Frame(self.master) self.button1 = tk.Button(self.frame,text="Aparta tu mesa",width=80,command = self.new_window) self.button1.pack() self.frame.pack() def new_window(self): self.newWindow = tk.Toplevel(self.master) self.app = Demo2(self.newWindow) class Demo22: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.quitButton = tk.Button(self.frame, text = 'Volver a inicio', width = 80, command = self.close_windows) self.quitButton.pack() self.frame.pack() def close_windows(self): self.master.destroy() class Demo32: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.frame = tk.Frame(self.master) self.button1 = tk.Button(self.frame,text="Inventario",width=80,command = self.new_window) self.button1.pack() self.frame.pack() def new_window(self): self.newWindow = tk.Toplevel(self.master) self.app = Demo2(self.newWindow) class Demo23: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.quitButton = tk.Button(self.frame, text = 'Volver a incio', width = 80, command = self.close_windows) self.quitButton.pack() self.frame.pack() def close_windows(self): self.master.destroy() class Demo42: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.frame = tk.Frame(self.master) self.button1 = tk.Button(self.frame,text="Registro de empleados",command = self.new_window,width=80) self.button1.pack() self.frame.pack() def new_window(self): self.newWindow = tk.Toplevel(self.master) self.app = Demo2(self.newWindow) class Demo24: def __init__(self, master): self.master = master self.frame = tk.Frame(self.master) self.quitButton = tk.Button(self.frame, text = 'Volver a incio', width = 80, command = self.close_windows) self.quitButton.pack() self.frame.pack() def close_windows(self): self.master.destroy() def main(): root=tk.Tk() root.geometry=("3000x3000") app = Demo1(root) app= Demo12(root) app=Demo32(root) app=Demo42(root) root.mainloop() if __name__ == '__main__': main() """ from tkinter import * from tkinter.ttk import * class NewWindow(Toplevel): def __init__(self, master = None): super().__init__(master = master) self.title("New Window") self.geometry("200x200") label = Label(self, text ="This is a new Window") label.pack() master = Tk() master.geometry("200x200") label = Label(master, text ="This is the main window") label.pack(side = TOP, pady = 10) btn = Button(master,text ="Click to open a new window") btn.bind("<Button>", lambda e: NewWindow(master)) btn.pack(pady = 10) mainloop() """
27.165563
115
0.613847
521
4,102
4.710173
0.153551
0.150774
0.136919
0.08313
0.734311
0.734311
0.734311
0.734311
0.718826
0.718826
0
0.025987
0.258898
4,102
151
116
27.165563
0.78125
0
0
0.728261
0
0
0.038117
0
0
0
0
0
0
1
0.184783
false
0
0.021739
0
0.293478
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
456f96cea1f3698ea3b2d72ed8aa82b9e037dc5c
71
py
Python
data/reconstruction/scar_seg/__init__.py
mseitzer/csmri-refinement
2cc8a691c03602c2a7c78c6144469ee00a7d64d6
[ "Apache-2.0" ]
27
2018-12-04T03:03:17.000Z
2022-02-26T16:42:07.000Z
data/reconstruction/scar_seg/__init__.py
mseitzer/csmri-refinement
2cc8a691c03602c2a7c78c6144469ee00a7d64d6
[ "Apache-2.0" ]
1
2019-07-05T12:04:05.000Z
2019-08-14T13:39:30.000Z
data/reconstruction/scar_seg/__init__.py
mseitzer/csmri-refinement
2cc8a691c03602c2a7c78c6144469ee00a7d64d6
[ "Apache-2.0" ]
6
2018-08-26T12:16:27.000Z
2021-02-25T10:14:21.000Z
from .scar_segmentation import get_train_set, get_val_set, get_test_set
71
71
0.887324
13
71
4.307692
0.692308
0.214286
0
0
0
0
0
0
0
0
0
0
0.070423
71
1
71
71
0.848485
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
457213d6025d056114381208d3b77c5eb4c097ca
87
py
Python
backend/mosaico/admin.py
PythonicNinja/pymosaico
9a0b1a82aad23adb496944ef09609208585ac3ee
[ "MIT" ]
1
2016-12-15T06:10:45.000Z
2016-12-15T06:10:45.000Z
backend/mosaico/admin.py
PythonicNinja/pymosaico
9a0b1a82aad23adb496944ef09609208585ac3ee
[ "MIT" ]
null
null
null
backend/mosaico/admin.py
PythonicNinja/pymosaico
9a0b1a82aad23adb496944ef09609208585ac3ee
[ "MIT" ]
null
null
null
from django.contrib import admin from models import * admin.site.register([Mosaico])
14.5
32
0.781609
12
87
5.666667
0.75
0.323529
0
0
0
0
0
0
0
0
0
0
0.126437
87
5
33
17.4
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
458b6c4a68cb20c7d7e5f708205af08c43cfb055
39
py
Python
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_16_absolute_from_import_with_alias.py
JetBrains-Research/Lupa
c105487621564c60cae17395bf32eb40868ceb89
[ "Apache-2.0" ]
16
2022-01-11T00:32:20.000Z
2022-03-25T21:40:52.000Z
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_16_absolute_from_import_with_alias.py
nbirillo/Kotlin-Analysis
73c3b8a59bf40ed932bb512f30b0ff31f251af40
[ "Apache-2.0" ]
12
2021-07-05T11:42:01.000Z
2021-12-23T07:57:54.000Z
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_16_absolute_from_import_with_alias.py
nbirillo/Kotlin-Analysis
73c3b8a59bf40ed932bb512f30b0ff31f251af40
[ "Apache-2.0" ]
3
2021-09-10T13:21:54.000Z
2021-11-23T11:37:55.000Z
from src.tasks.task1 import utils as u
19.5
38
0.794872
8
39
3.875
1
0
0
0
0
0
0
0
0
0
0
0.030303
0.153846
39
1
39
39
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4591222dd64ec0841a2c7d08cc8dca0aac7049b5
28
py
Python
weight_logger/__init__.py
KlaudijusM/wiifitboardbit
36b381ec51881ece112d5cb5264c064e9517afc4
[ "CC0-1.0" ]
null
null
null
weight_logger/__init__.py
KlaudijusM/wiifitboardbit
36b381ec51881ece112d5cb5264c064e9517afc4
[ "CC0-1.0" ]
null
null
null
weight_logger/__init__.py
KlaudijusM/wiifitboardbit
36b381ec51881ece112d5cb5264c064e9517afc4
[ "CC0-1.0" ]
null
null
null
from . import weight_logger
14
27
0.821429
4
28
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
28
1
28
28
0.916667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4592adc53688fa5c73055256aa3046c6d58e4302
139
py
Python
syzscope/interface/vm/error.py
plummm/SyzScope
71cc3dd973e7cd7bc14c2436cacc46a1a62fb942
[ "MIT" ]
20
2021-10-02T10:51:43.000Z
2022-03-24T07:45:13.000Z
syzscope/interface/vm/error.py
seclab-ucr/SyzScope
b1a6e20783ba8c92dd33d508e469bc24eaacaab6
[ "MIT" ]
2
2022-02-20T05:07:32.000Z
2022-03-22T02:23:24.000Z
syzscope/interface/vm/error.py
seclab-ucr/SyzScope
b1a6e20783ba8c92dd33d508e469bc24eaacaab6
[ "MIT" ]
1
2022-02-21T14:12:56.000Z
2022-02-21T14:12:56.000Z
class QemuIsDead(Exception): pass class AngrRefuseToLoadKernel(Exception): pass class KasanReportEntryNotFound(Exception): pass
23.166667
42
0.791367
12
139
9.166667
0.5
0.354545
0.327273
0
0
0
0
0
0
0
0
0
0.143885
139
6
43
23.166667
0.92437
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
6
afd7098b7b0b63d6a0471262db35fec45810f3c6
57
py
Python
www/apps/profiles/middleware/__init__.py
un33k/outsourcefactor
c48dbd11b74ba5fb72b85f05c431a16287f62507
[ "MIT" ]
2
2018-12-23T04:14:32.000Z
2018-12-23T15:02:08.000Z
www/apps/profiles/middleware/__init__.py
un33k/outsourcefactor
c48dbd11b74ba5fb72b85f05c431a16287f62507
[ "MIT" ]
null
null
null
www/apps/profiles/middleware/__init__.py
un33k/outsourcefactor
c48dbd11b74ba5fb72b85f05c431a16287f62507
[ "MIT" ]
1
2019-11-17T19:53:07.000Z
2019-11-17T19:53:07.000Z
from ProfileTypeMiddleware import ProfileTypeMiddleware
19
55
0.912281
4
57
13
0.75
0
0
0
0
0
0
0
0
0
0
0
0.087719
57
2
56
28.5
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b31490d4cdd2352bd6a6ef9f50d9d643a3fec8dd
238
py
Python
utils/__init__.py
sebemery/Lipschitz-constrained-neural-networks
79ac2c6f7e7cef692cacf35619baf91beaeba948
[ "MIT" ]
null
null
null
utils/__init__.py
sebemery/Lipschitz-constrained-neural-networks
79ac2c6f7e7cef692cacf35619baf91beaeba948
[ "MIT" ]
null
null
null
utils/__init__.py
sebemery/Lipschitz-constrained-neural-networks
79ac2c6f7e7cef692cacf35619baf91beaeba948
[ "MIT" ]
null
null
null
from .logger import Logger from .metrics import * from .htmlwriter import * from .Spectral_Normalize import * from .Spectral_Normalize_chen import * from .ComputeSV import SingularValues from .bn_sn_chen import * from .utilities import *
26.444444
38
0.806723
31
238
6.032258
0.419355
0.26738
0.192513
0.28877
0
0
0
0
0
0
0
0
0.134454
238
8
39
29.75
0.907767
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
b31e87b42152399d2c86bf8f2584fa6dbde06b82
120
py
Python
app/extras/__init__.py
originaltebas/chmembers
983578ec8cb6d1da76e98b1467d996d6fac752ee
[ "MIT" ]
null
null
null
app/extras/__init__.py
originaltebas/chmembers
983578ec8cb6d1da76e98b1467d996d6fac752ee
[ "MIT" ]
2
2021-09-08T01:19:10.000Z
2022-03-11T23:59:40.000Z
app/extras/__init__.py
originaltebas/chmembers
983578ec8cb6d1da76e98b1467d996d6fac752ee
[ "MIT" ]
1
2019-04-09T10:42:20.000Z
2019-04-09T10:42:20.000Z
# app/extras/__init__.py from flask import Blueprint extras = Blueprint('extras', __name__) from . import views
17.142857
39
0.725
15
120
5.266667
0.666667
0.379747
0
0
0
0
0
0
0
0
0
0
0.183333
120
7
40
17.142857
0.806122
0.183333
0
0
0
0
0.065934
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
b32c196365ece289a7a298e5c91866872ef000d4
33
py
Python
__init__.py
InfernoPL/pyramid-text
d9e4b9444a2d1411caad740824d7ee062acdb254
[ "MIT" ]
null
null
null
__init__.py
InfernoPL/pyramid-text
d9e4b9444a2d1411caad740824d7ee062acdb254
[ "MIT" ]
null
null
null
__init__.py
InfernoPL/pyramid-text
d9e4b9444a2d1411caad740824d7ee062acdb254
[ "MIT" ]
null
null
null
from pyramidtext.pyramid import *
33
33
0.848485
4
33
7
1
0
0
0
0
0
0
0
0
0
0
0
0.090909
33
1
33
33
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b35f84678745c9c4de019b490ea9fdac38d66a0c
146
py
Python
src/my_module.py
qiuosier/python_test
1513611b882741da74a05dd62d465e9c21ee481b
[ "MIT" ]
null
null
null
src/my_module.py
qiuosier/python_test
1513611b882741da74a05dd62d465e9c21ee481b
[ "MIT" ]
null
null
null
src/my_module.py
qiuosier/python_test
1513611b882741da74a05dd62d465e9c21ee481b
[ "MIT" ]
null
null
null
import os import ads def my_function_in_module(): print("This is a function in a module.") print(ads.__version__) print(os.environ)
16.222222
44
0.705479
23
146
4.173913
0.608696
0.208333
0
0
0
0
0
0
0
0
0
0
0.19863
146
8
45
18.25
0.820513
0
0
0
0
0
0.212329
0
0
0
0
0
0
1
0.166667
true
0
0.333333
0
0.5
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
6
2fb061817a75fdf42148b874ff425c0e4f15fd65
115
py
Python
codewars/8kyu/dinamuh/YouCantCode/test.py
dinamuh/Training_one
d18e8fb12608ce1753162c20252ca928c4df97ab
[ "MIT" ]
null
null
null
codewars/8kyu/dinamuh/YouCantCode/test.py
dinamuh/Training_one
d18e8fb12608ce1753162c20252ca928c4df97ab
[ "MIT" ]
2
2019-01-22T10:53:42.000Z
2019-01-31T08:02:48.000Z
codewars/8kyu/dinamuh/YouCantCode/test.py
dinamuh/Training_one
d18e8fb12608ce1753162c20252ca928c4df97ab
[ "MIT" ]
13
2019-01-22T10:37:42.000Z
2019-01-25T13:30:43.000Z
from main import double_integer def test_double_integer(benchmark): assert benchmark(double_integer, 2) == 4
19.166667
44
0.782609
16
115
5.375
0.6875
0.453488
0
0
0
0
0
0
0
0
0
0.020408
0.147826
115
5
45
23
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
6
2fe7b5d0377674e4a4ba2e3d0143e51729a289c6
1,227
py
Python
matrx/logger/log_tick.py
matrx-software/matrx
5b36ef1018e85172dc88cd7467e3087ef94c58ba
[ "MIT" ]
6
2020-03-02T10:42:34.000Z
2021-05-16T12:21:25.000Z
matrx/logger/log_tick.py
matrx-software/matrx
5b36ef1018e85172dc88cd7467e3087ef94c58ba
[ "MIT" ]
262
2020-02-27T13:37:40.000Z
2022-03-29T11:44:57.000Z
matrx/logger/log_tick.py
matrx-software/matrx
5b36ef1018e85172dc88cd7467e3087ef94c58ba
[ "MIT" ]
3
2020-02-27T12:59:22.000Z
2021-12-10T13:53:58.000Z
from matrx.logger.logger import GridWorldLogger, GridWorldLoggerV2 class LogDuration(GridWorldLogger): """ Log the number of ticks the Gridworld was running on completion """ def __init__(self, save_path="", file_name_prefix="", file_extension=".csv", delimeter=";"): super().__init__(save_path=save_path, file_name=file_name_prefix, file_extension=file_extension, delimiter=delimeter, log_strategy=self.LOG_ON_LAST_TICK) def log(self, grid_world, agent_data): log_statement = { "tick": grid_world.current_nr_ticks } return log_statement class LogDurationV2(GridWorldLoggerV2): """ Log the number of ticks the Gridworld was running on completion """ def __init__(self, save_path="", file_name_prefix="", file_extension=".csv", delimeter=";"): super().__init__(save_path=save_path, file_name=file_name_prefix, file_extension=file_extension, delimiter=delimeter, log_strategy=self.LOG_ON_LAST_TICK) def log(self, world_state, agent_data, grid_world): log_statement = { "tick": grid_world.current_nr_ticks } return log_statement
39.580645
105
0.673187
145
1,227
5.275862
0.303448
0.062745
0.062745
0.08366
0.776471
0.776471
0.776471
0.776471
0.776471
0.776471
0
0.003178
0.230644
1,227
30
106
40.9
0.807203
0.104319
0
0.631579
0
0
0.017062
0
0
0
0
0
0
1
0.210526
false
0
0.052632
0
0.473684
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
641bf78cf98b1fe2b8f548cb600831bcb7e706b1
6,735
py
Python
simple_tree_tests.py
pombredanne/farach-suffix-tree
a90ac6df57ed8b7512b43c781a04fc6e0d43654e
[ "MIT" ]
1
2021-03-01T13:10:00.000Z
2021-03-01T13:10:00.000Z
simple_tree_tests.py
pombredanne/farach-suffix-tree
a90ac6df57ed8b7512b43c781a04fc6e0d43654e
[ "MIT" ]
1
2021-02-27T08:52:58.000Z
2021-03-01T13:09:46.000Z
simple_tree_tests.py
pombredanne/farach-suffix-tree
a90ac6df57ed8b7512b43c781a04fc6e0d43654e
[ "MIT" ]
1
2021-02-27T08:50:08.000Z
2021-02-27T08:50:08.000Z
import farach from utils import Node def run_tests(): inputstr = farach.str2int('1') constructed_tree = farach.construct_suffix_tree(inputstr) expected_result = Node(aId='root') expected_result.add_child(Node(aId=1, aStrLength=2)) expected_result.add_child(Node(aId=2, aStrLength=1)) constructed_tree.update_leaf_list() expected_result.update_leaf_list() # print('inputstr: %s' % inputstr) # print('expected:') # print(expected_result.fancyprint()) # print('actual:') # print(constructed_tree.fancyprint()) assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) inputstr = farach.str2int('12') constructed_tree = farach.construct_suffix_tree(inputstr) expected_result = Node(aId='root') expected_result.add_child(Node(aId=1, aStrLength=3)) expected_result.add_child(Node(aId=2, aStrLength=2)) expected_result.add_child(Node(aId=3, aStrLength=1)) constructed_tree.update_leaf_list() expected_result.update_leaf_list() # print('inputstr: %s' % inputstr) # print('expected:') # print(expected_result.fancyprint(inputstr)) # print('actual:') # print(constructed_tree.fancyprint(inputstr)) assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) inputstr = farach.str2int('11') constructed_tree = farach.construct_suffix_tree(inputstr) expected_result = Node(aId='root') innernode = Node(aId='inner', aStrLength=1) expected_result.add_child(innernode) innernode.add_child(Node(aId=1, aStrLength=3)) innernode.add_child(Node(aId=2, aStrLength=2)) expected_result.add_child(Node(aId=3, aStrLength=1)) constructed_tree.update_leaf_list() expected_result.update_leaf_list() # print('inputstr: %s' % inputstr) # print('expected:') # print(expected_result.fancyprint(inputstr)) # print('actual:') # print(constructed_tree.fancyprint(inputstr)) assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) inputstr = farach.str2int('111') constructed_tree = farach.construct_suffix_tree(inputstr) expected_result = Node(aId='root') inner1 = Node(aId='inner', aStrLength=1) inner2 = Node(aId='inner', aStrLength=2) leaf1 = Node(aId=1, aStrLength=4) leaf2 = Node(aId=2, aStrLength=3) leaf3 = Node(aId=3, aStrLength=2) leaf4 = Node(aId=4, aStrLength=1) expected_result.add_child(inner1) expected_result.add_child(leaf4) inner1.add_child(inner2) inner1.add_child(leaf3) inner2.add_child(leaf1) inner2.add_child(leaf2) constructed_tree.update_leaf_list() expected_result.update_leaf_list() # print('inputstr: %s' % inputstr) # print('expected:') # print(expected_result.fancyprint(inputstr)) # print('actual:') # print(constructed_tree.fancyprint(inputstr)) assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) # inputstr = farach.str2int('122') # constructed_tree = farach.construct_suffix_tree(inputstr) # expected_result = Node(aId='root') # expected_result.add_child(Node(aId=1, aStrLength=[12])) # assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) inputstr = farach.str2int('1222') constructed_tree = farach.construct_suffix_tree(inputstr) expected_result = Node(aId='root') inner1 = Node(aId='inner', aStrLength=1) inner2 = Node(aId='inner', aStrLength=2) leaf1 = Node(aId=1, aStrLength=5) leaf2 = Node(aId=2, aStrLength=4) leaf3 = Node(aId=3, aStrLength=3) leaf4 = Node(aId=4, aStrLength=2) leaf5 = Node(aId=5, aStrLength=1) expected_result.add_child(leaf1) expected_result.add_child(inner1) expected_result.add_child(leaf5) inner1.add_child(inner2) inner1.add_child(leaf4) inner2.add_child(leaf2) inner2.add_child(leaf3) expected_result.update_leaf_list() # print('inputstr: %s' % inputstr) # print('expected:') # print(expected_result.fancyprint(inputstr)) # print('actual:') # print(constructed_tree.fancyprint(inputstr)) assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) # inputstr = farach.str2int('1221') # constructed_tree = farach.construct_suffix_tree(inputstr) # expected_result = Node(aId='root') # expected_result.add_child(Node(aId=1, aStrLength=[12])) # assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) # inputstr = farach.str2int('2221') # constructed_tree = farach.construct_suffix_tree(inputstr) # expected_result = Node(aId='root') # expected_result.add_child(Node(aId=1, aStrLength=[12])) # assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) banana_test() print('tests succeeded!') def current_test(): inputstr = farach.str2int('1222') constructed_tree = farach.construct_suffix_tree(inputstr) expected_result = Node(aId='root') inner1 = Node(aId='inner', aStrLength=[2]) inner2 = Node(aId='inner', aStrLength=[2]) leaf1 = Node(aId=1, aStrLength=[1, 2, 2, 2, 3]) leaf2 = Node(aId=2, aStrLength=[2, 3]) leaf3 = Node(aId=3, aStrLength=[3]) leaf4 = Node(aId=4, aStrLength=[3]) leaf5 = Node(aId=5, aStrLength=[3]) expected_result.add_child(leaf1) expected_result.add_child(inner1) expected_result.add_child(leaf5) inner1.add_child(inner2) inner1.add_child(leaf4) inner2.add_child(leaf2) inner2.add_child(leaf3) # print('-'*80) # print('inputstr: %s' % inputstr) # print('expected:') # print(expected_result.fancyprint(inputstr)) # print('actual:') # print(constructed_tree.fancyprint(inputstr)) assert constructed_tree.fancyprint(inputstr) == expected_result.fancyprint(inputstr) def banana_test(): # banana # 123232 inputstr = farach.str2int('123232') root = Node(aId="root") root.add_child(Node(7, 1)) inner = Node(1, "inner") root.add_child(inner) inner2 = Node(3, "inner") inner2.add_child(Node(6, 2)) inner2.add_child(Node(4, 4)) inner.add_child(inner2) inner.add_child(Node(2, 6)) inner = Node(2, "inner") inner.add_child(Node(5, 3)) inner.add_child(Node(3, 5)) root.add_child(inner) root.add_child(Node(1, 7)) constructed_tree = farach.construct_suffix_tree(inputstr) root.update_leaf_list() # print(constructed_tree.fancyprint(inputstr)) # print(root.fancyprint(inputstr)) assert constructed_tree.fancyprint(inputstr) == root.fancyprint(inputstr) def main(): run_tests() if __name__ == '__main__': main()
34.187817
90
0.703044
831
6,735
5.489771
0.073406
0.144235
0.086804
0.086804
0.871548
0.83253
0.803595
0.766988
0.757124
0.746164
0
0.033475
0.161693
6,735
196
91
34.362245
0.774531
0.2732
0
0.491071
0
0
0.025599
0
0
0
0
0
0.0625
1
0.035714
false
0
0.017857
0
0.053571
0.071429
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
642271708dc21d2fb932d72d393983ccada0e7bc
2,712
py
Python
minecraft_letters.py
blackswanburst/mikemccllstr-python-minecraft
25a5a91269b6932157ec054c98902d79ad236871
[ "MIT" ]
3
2019-03-26T15:55:01.000Z
2020-09-02T09:01:48.000Z
minecraft_letters.py
mikemccllstr/mikemccllstr-python-minecraft
b1765ad7bb39dfad00944a7d8fa914484c88f95a
[ "MIT" ]
null
null
null
minecraft_letters.py
mikemccllstr/mikemccllstr-python-minecraft
b1765ad7bb39dfad00944a7d8fa914484c88f95a
[ "MIT" ]
2
2018-07-27T14:10:29.000Z
2020-01-16T09:06:48.000Z
# Written by Jessica Zehavi for CoderDojo Twin Cities - www.coderdojotc.org def write ( minecraft, text, material ): letters = { 'A' : [[1,0,0,1],[1,0,0,1],[1,1,1,1],[1,0,0,1],[0,1,1,0]], 'B' : [[1,1,1,0],[1,0,0,1],[1,1,1,0],[1,0,0,1],[1,1,1,0]], 'C' : [[0,1,1,0],[1,0,0,1],[1,0,0,0],[1,0,0,1],[0,1,1,0]], 'D' : [[1,1,1,0],[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,1,1,0]], 'E' : [[1,1,1,1],[1,0,0,0],[1,1,1,1],[1,0,0,0],[1,1,1,1]], 'F' : [[1,1,1,1],[1,0,0,0],[1,1,1,1],[1,0,0,0],[1,0,0,0]], 'G' : [[1,1,1,0],[1,0,0,1],[1,0,0,0],[1,0,1,1],[0,1,1,0]], 'H' : [[1,0,0,1],[1,0,0,1],[1,1,1,1],[1,0,0,1],[1,0,0,1]], 'I' : [[1,1,1],[0,1,0],[0,1,0],[0,1,0],[1,1,1]], 'J' : [[0,1,1,0],[1,0,0,1],[0,0,0,1],[0,0,0,1],[0,0,0,1]], 'K' : [[1,0,0,1],[1,0,1,0],[1,1,0,0],[1,0,1,0],[1,0,0,1]], 'L' : [[1,1,1,1],[1,0,0,0],[1,0,0,0],[1,0,0,0],[1,0,0,0]], 'M' : [[1,0,0,0,1],[1,0,0,0,1],[1,0,1,0,1],[1,1,0,1,1],[1,0,0,0,1]], 'N' : [[1,0,0,0,1],[1,0,0,1,1],[1,0,1,0,1],[1,1,0,0,1],[1,0,0,0,1]], 'O' : [[0,1,1,0],[1,0,0,1],[1,0,0,1],[1,0,0,1],[0,1,1,0]], 'P' : [[1,0,0,0],[1,0,0,0],[1,1,1,1],[1,0,0,1],[1,1,1,1]], 'Q' : [[0,0,1,1],[0,1,1,0],[1,0,0,1],[1,0,0,1],[0,1,1,0]], 'R' : [[1,0,0,1],[1,0,1,0],[1,1,1,1],[1,0,0,1],[1,1,1,1]], 'S' : [[1,1,1,0],[0,0,0,1],[0,1,1,0],[1,0,0,0],[0,1,1,1]], 'T' : [[0,1,0],[0,1,0],[0,1,0],[0,1,0],[1,1,1]], 'U' : [[0,1,1,0],[1,0,0,1],[1,0,0,1],[1,0,0,1],[1,0,0,1]], 'V' : [[0,0,1,0,0],[0,1,0,1,0],[0,1,0,1,0],[1,0,0,0,1],[1,0,0,0,1]], 'W' : [[0,1,0,1,0],[1,0,1,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,0,0,0,1]], 'X' : [[1,0,0,0,1],[0,1,0,1,0],[0,0,1,0,0],[0,1,0,1,0],[1,0,0,0,1]], 'Y' : [[0,0,1,0,0],[0,0,1,0,0],[0,1,0,1,0],[1,0,0,0,1],[1,0,0,0,1]], 'Z' : [[1,1,1,1,1],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[1,1,1,1,1]], ' ' : [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]} # Get the player's current position pos = minecraft.player.getPos() row = 0 kearning = 0 for letter in text.upper(): print letter while row < len( letters[letter] ): col = 0 while col < len( letters[letter][0] ): # If a block should be printed in that row/column for a # given letter, print it if letters[letter][row][col]: minecraft.setBlock( pos.x - col - kearning, pos.y + row + 1, pos.z, material ) col = col + 1 row = row + 1 # Reset the row and col for each letter row = 0 # Adjust the spacing based on how big the letter is since this is # not a fixed width font kearning = kearning + len( letters[letter][0] ) + 1
45.2
83
0.411504
712
2,712
1.567416
0.122191
0.249104
0.209677
0.139785
0.501792
0.501792
0.498208
0.481183
0.458781
0.391577
0
0.263767
0.203171
2,712
59
84
45.966102
0.252661
0.114676
0
0.047619
0
0
0.011283
0
0
0
0
0
0
0
null
null
0
0
null
null
0.02381
0
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
643fa23086981775919ed3064787e365e8b68088
11,873
py
Python
examples/ner/__main__.py
nlpaueb/GreekBERT
2f0d84b65b77e8465bbbdbe77f9ec5a685b1ce15
[ "MIT" ]
117
2020-02-14T02:05:29.000Z
2022-03-24T23:03:37.000Z
examples/ner/__main__.py
nlpaueb/GreekBERT
2f0d84b65b77e8465bbbdbe77f9ec5a685b1ce15
[ "MIT" ]
4
2020-02-14T20:29:44.000Z
2022-02-28T07:44:22.000Z
examples/ner/__main__.py
nlpaueb/GreekBERT
2f0d84b65b77e8465bbbdbe77f9ec5a685b1ce15
[ "MIT" ]
10
2020-02-19T09:22:37.000Z
2021-12-05T14:29:45.000Z
import click import fasttext import numpy as np import pickle from ..utils.fasttext_downloader import download_model from ..utils.text import strip_accents_and_lowercase from .utils import parse_ner_dataset_file from .bert.system_wrapper import NERBERTSystemWrapper from .rnn.system_wrapper import NERRNNSystemWrapper @click.group() def ner(): pass @ner.group() def multi_bert(): pass @multi_bert.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('val_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.option('--multi-gpu', is_flag=True) def tune(train_dataset_file, val_dataset_file, multi_gpu): results = NERBERTSystemWrapper.tune( 'bert-base-multilingual-uncased', strip_accents_and_lowercase, True, train_dataset_file, val_dataset_file, multi_gpu ) print(max(results, key=lambda x: x[0])) @multi_bert.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.argument('test_dataset_file', type=click.File('r'), default='data/ner/test.txt') @click.option('--batch-size', type=int, default=8) @click.option('--lr', type=float, default=3e-05) @click.option('--dp', type=float, default=0) @click.option('--grad-accumulation-steps', type=int, default=2) @click.option('--multi-gpu', is_flag=True) @click.option('--silent', is_flag=True) @click.option('--seed', type=int, default=0) def run(train_dataset_file, dev_dataset_file, test_dataset_file, batch_size, lr, dp, grad_accumulation_steps, multi_gpu, silent, seed): sw = NERBERTSystemWrapper( 'bert-base-multilingual-uncased', strip_accents_and_lowercase, True, {'dp': dp} ) sw.train(train_dataset_file, dev_dataset_file, lr, batch_size, grad_accumulation_steps, multi_gpu, not silent, seed) results = sw.evaluate(test_dataset_file, batch_size, multi_gpu, not silent) print(results) @ner.group() def greek_bert(): pass @greek_bert.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.option('--multi-gpu', is_flag=True) def tune(train_dataset_file, dev_dataset_file, multi_gpu): results = NERBERTSystemWrapper.tune( 'nlpaueb/bert-base-greek-uncased-v1', strip_accents_and_lowercase, True, train_dataset_file, dev_dataset_file, multi_gpu ) print(max(results, key=lambda x: x[0])) @greek_bert.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.argument('test_dataset_file', type=click.File('r'), default='data/ner/test.txt') @click.option('--model-weights-save-path', type=str, default=None) @click.option('--batch-size', type=int, default=8) @click.option('--lr', type=float, default=5e-05) @click.option('--dp', type=float, default=0.2) @click.option('--grad-accumulation-steps', type=int, default=2) @click.option('--multi-gpu', is_flag=True) @click.option('--silent', is_flag=True) @click.option('--seed', type=int, default=0) def run(train_dataset_file, dev_dataset_file, test_dataset_file, model_weights_save_path, batch_size, lr, dp, grad_accumulation_steps, multi_gpu, silent, seed): sw = NERBERTSystemWrapper( 'nlpaueb/bert-base-greek-uncased-v1', strip_accents_and_lowercase, True, {'dp': dp} ) sw.train(train_dataset_file, dev_dataset_file, lr, batch_size, grad_accumulation_steps, multi_gpu, not silent, seed) results = sw.evaluate(test_dataset_file, batch_size, multi_gpu, not silent) print(results) if model_weights_save_path: sw.save_model_state(model_weights_save_path) @ner.group() def cased_multi_bert(): pass @cased_multi_bert.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.option('--multi-gpu', is_flag=True) def tune(train_dataset_file, dev_dataset_file, multi_gpu): results = NERBERTSystemWrapper.tune( 'bert-base-multilingual-cased', None, True, train_dataset_file, dev_dataset_file, multi_gpu ) print(max(results, key=lambda x: x[0])) @cased_multi_bert.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.argument('test_dataset_file', type=click.File('r'), default='data/ner/test.txt') @click.option('--batch-size', type=int, default=4) @click.option('--lr', type=float, default=2e-05) @click.option('--dp', type=float, default=0) @click.option('--grad-accumulation-steps', type=int, default=8) @click.option('--multi-gpu', is_flag=True) @click.option('--silent', is_flag=True) @click.option('--seed', type=int, default=0) def run(train_dataset_file, dev_dataset_file, test_dataset_file, batch_size, lr, dp, grad_accumulation_steps, multi_gpu, silent, seed): sw = NERBERTSystemWrapper( 'bert-base-multilingual-cased', None, True, {'dp': dp} ) sw.train(train_dataset_file, dev_dataset_file, lr, batch_size, grad_accumulation_steps, multi_gpu, not silent, seed) results = sw.evaluate(test_dataset_file, batch_size, multi_gpu, not silent) print(results) @ner.group() def xlm_r(): pass @xlm_r.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.option('--multi-gpu', is_flag=True) def tune(train_dataset_file, dev_dataset_file, multi_gpu): results = NERBERTSystemWrapper.tune( 'xlm-roberta-base', None, False, train_dataset_file, dev_dataset_file, multi_gpu ) print(max(results, key=lambda x: x[0])) @xlm_r.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.argument('test_dataset_file', type=click.File('r'), default='data/ner/test.txt') @click.option('--model-weights-save-path', type=str, default=None) @click.option('--batch-size', type=int, default=8) @click.option('--lr', type=float, default=2e-05) @click.option('--dp', type=float, default=0) @click.option('--grad-accumulation-steps', type=int, default=2) @click.option('--multi-gpu', is_flag=True) @click.option('--silent', is_flag=True) @click.option('--seed', type=int, default=0) def run(train_dataset_file, dev_dataset_file, test_dataset_file, model_weights_save_path, batch_size, lr, dp, grad_accumulation_steps, multi_gpu, silent, seed): sw = NERBERTSystemWrapper( 'xlm-roberta-base', None, False, {'dp': dp} ) sw.train(train_dataset_file, dev_dataset_file, lr, batch_size, grad_accumulation_steps, multi_gpu, not silent, seed) results = sw.evaluate(test_dataset_file, batch_size, multi_gpu, not silent) print(results) if model_weights_save_path: sw.save_model_state(model_weights_save_path) @ner.group() def rnn(): pass @rnn.command() @click.argument('tmp_download_path', type=str, default='data') @click.argument('embeddings_save_path', type=str, default='data/ner/ner_ft.pkl') @click.argument('dataset_file_paths', type=str, nargs=-1) def download_embeddings(tmp_download_path, embeddings_save_path, dataset_file_paths): download_model('el', tmp_download_path, if_exists='ignore') ft = fasttext.load_model(f'{tmp_download_path}/cc.el.300.bin') if not dataset_file_paths: dataset_file_paths = [f'data/ner/{ds}.txt' for ds in ('silver_train', 'dev', 'test')] vocab = set() for p in dataset_file_paths: with open(p) as fr: for e in parse_ner_dataset_file(fr): for t in e: vocab.add(t['text'].lower()) word_vectors = [] i2w = list(vocab) for word in i2w: word_vectors.append(ft.get_word_vector(word)) word_vectors = [[0] * len(word_vectors[0])] + word_vectors i2w = ['<PAD>'] + i2w w2i = {w: i for i, w in enumerate(i2w)} with open(embeddings_save_path, 'wb') as fw: pickle.dump((np.array(word_vectors), w2i, i2w), fw) @rnn.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/silver_train.txt') @click.argument('char_vocab_save_path', type=str, default='data/ner/char_voc.pkl') def create_char_vocab(train_dataset_file, char_vocab_save_path): vocab = set() for e in parse_ner_dataset_file(train_dataset_file): for t in e: vocab.update(list(t['text'])) c2i = {c: i + 4 for i, c in enumerate(vocab)} c2i['<PAD>'] = 0 c2i['<UNK>'] = 1 c2i['<SOW>'] = 2 c2i['<EOW>'] = 3 with open(char_vocab_save_path, 'wb') as fw: pickle.dump(c2i, fw) @rnn.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.argument('embeddings_file', type=click.File('rb'), default='data/ner/ner_ft.pkl') @click.argument('char_vocab_file', type=click.File('rb'), default='data/ner/char_voc.pkl') @click.option('--multi-gpu', is_flag=True) def tune(train_dataset_file, dev_dataset_file, embeddings_file, char_vocab_file, multi_gpu): embeddings, w2i, _ = pickle.load(embeddings_file) c2i = pickle.load(char_vocab_file) results = NERRNNSystemWrapper.tune( embeddings, w2i, c2i, train_dataset_file, dev_dataset_file, multi_gpu ) print(max(results, key=lambda x: x[0])) @rnn.command() @click.argument('train_dataset_file', type=click.File('r'), default='data/ner/train.txt') @click.argument('dev_dataset_file', type=click.File('r'), default='data/ner/dev.txt') @click.argument('test_dataset_file', type=click.File('r'), default='data/ner/test.txt') @click.argument('embeddings_file', type=click.File('rb'), default='data/ner/ner_ft.pkl') @click.argument('char_vocab_file', type=click.File('rb'), default='data/ner/char_voc.pkl') @click.option('--batch-size', type=int, default=16) @click.option('--lr', type=float, default=1e-03) @click.option('--dp', type=float, default=0.3) @click.option('--rnn-hs', type=int, default=300) @click.option('--char-emb-size', type=int, default=30) @click.option('--grad-accumulation-steps', type=int, default=1) @click.option('--multi-gpu', is_flag=True) @click.option('--silent', is_flag=True) @click.option('--seed', type=int, default=0) def run(train_dataset_file, dev_dataset_file, test_dataset_file, embeddings_file, char_vocab_file, batch_size, lr, dp, rnn_hs, char_emb_size, grad_accumulation_steps, multi_gpu, silent, seed): embeddings, w2i, _ = pickle.load(embeddings_file) c2i = pickle.load(char_vocab_file) sw = NERRNNSystemWrapper( embeddings, w2i, c2i, { 'rnn_dp': dp, 'mlp_dp': dp, 'rnn_hidden_size': rnn_hs, 'char_embeddings_shape': (len(c2i), char_emb_size) } ) sw.train(train_dataset_file, dev_dataset_file, lr, batch_size, grad_accumulation_steps, multi_gpu, not silent, seed) results = sw.evaluate(test_dataset_file, batch_size, multi_gpu, not silent) print(results) if __name__ == '__main__': ner()
35.762048
120
0.695443
1,722
11,873
4.567944
0.094077
0.120264
0.067124
0.064836
0.82329
0.809942
0.80206
0.765446
0.748284
0.742944
0
0.008262
0.143687
11,873
331
121
35.870091
0.765418
0
0
0.680297
0
0
0.173334
0.042197
0
0
0
0
0
1
0.066915
false
0.022305
0.033457
0
0.100372
0.037175
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ff6e2eb59c87f15bdc0489509746ed7638b79ffd
144
py
Python
src/echelon/api/__init__.py
takeshi-teshima/echelon-py
f95fd24f6023921fbe19f16ea7ab15cef5099e5c
[ "Apache-2.0" ]
null
null
null
src/echelon/api/__init__.py
takeshi-teshima/echelon-py
f95fd24f6023921fbe19f16ea7ab15cef5099e5c
[ "Apache-2.0" ]
3
2021-11-02T14:28:28.000Z
2022-01-28T03:51:07.000Z
src/echelon/api/__init__.py
takeshi-teshima/echelon-py
f95fd24f6023921fbe19f16ea7ab15cef5099e5c
[ "Apache-2.0" ]
null
null
null
from echelon.api.dataframe_api import DataFrameEchelonAnalysis from echelon.api.ndarray_api import OneDimEchelonAnalysis, TwoDimEchelonAnalysis
48
80
0.902778
15
144
8.533333
0.6
0.171875
0.21875
0
0
0
0
0
0
0
0
0
0.0625
144
2
81
72
0.948148
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ff7956e4c812ed56f7c672a4cfca3a22f6afab38
189
py
Python
src/onevision/file/handler/__init__.py
phlong3105/onevision
90552b64df7213e7fbe23c80ffd8a89583289433
[ "MIT" ]
2
2022-03-28T09:46:38.000Z
2022-03-28T14:12:32.000Z
src/onevision/file/handler/__init__.py
phlong3105/onevision
90552b64df7213e7fbe23c80ffd8a89583289433
[ "MIT" ]
null
null
null
src/onevision/file/handler/__init__.py
phlong3105/onevision
90552b64df7213e7fbe23c80ffd8a89583289433
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ """ from .base import * from .json_handler import * from .pickle_handler import * from .xml_handler import * from .yaml_handler import *
15.75
29
0.677249
26
189
4.769231
0.576923
0.322581
0.41129
0
0
0
0
0
0
0
0
0.006329
0.164021
189
11
30
17.181818
0.778481
0.222222
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ff93db564ec4e8a0ab37f7e02db65286ca351f31
3,499
py
Python
spring/timeseries/plot_overlap.py
RobertJaro/SpringProject
c1ca42650e5dfc6918b7e239fd52b02402ccb1c0
[ "Apache-2.0" ]
null
null
null
spring/timeseries/plot_overlap.py
RobertJaro/SpringProject
c1ca42650e5dfc6918b7e239fd52b02402ccb1c0
[ "Apache-2.0" ]
null
null
null
spring/timeseries/plot_overlap.py
RobertJaro/SpringProject
c1ca42650e5dfc6918b7e239fd52b02402ccb1c0
[ "Apache-2.0" ]
null
null
null
import datetime import os import pandas as pd import pytz from matplotlib import pyplot as plt, dates from pytz import UTC full_df = pd.read_csv("C:\\Users\\Robert\\Documents\\Uni\\SOLARNET\\HomogenizationCampaign\\data_set.csv",parse_dates=['date']) df = full_df[full_df.type == "halpha"] df = df[(df.date > pytz.utc.localize(datetime.datetime(2019, 7, 17))) & (df.date < pytz.utc.localize(datetime.datetime(2019, 7, 20)))] df = df.groupby(df.date.dt.day) plt.figure(figsize=(10, 5)) plt.suptitle('Overlap - H-alpha') for i, (group, day) in enumerate(df): plt.subplot(len(df) + 1, 1, i + 2) plt.title(datetime.date(2019, 7, group)) type_group = day.groupby(day.type) plt.vlines(day[day.observatory == "kso"].date, 0, 1, color="red", label="KSO") plt.vlines(day[day.observatory == "catania"].date, 1, 2, color="black", label="Catania") plt.vlines(day[day.observatory == "rob"].date, 2, 3, color="blue", label="ROB") plt.ylim((0, 3)) myFmt = dates.DateFormatter('%H:%M') plt.gca().xaxis.set_major_formatter(myFmt) if i == 0: lgd = plt.legend(bbox_to_anchor=(1.15, 1.15), loc="upper right") plt.yticks([]) plt.tight_layout(pad=0.4, w_pad=0.8, h_pad=.8) plt.savefig("C:\\Users\\Robert\\Documents\\Uni\\SOLARNET\\HomogenizationCampaign\\halpha_overlap.png", dpi=300, bbox_extra_artists=(lgd,), bbox_inches='tight') df = full_df[full_df.type == "caIIk"] df = df[(df.date > pytz.utc.localize(datetime.datetime(2019, 7, 17))) & (df.date < pytz.utc.localize(datetime.datetime(2019, 7, 20)))] df = df.groupby(df.date.dt.day) plt.figure(figsize=(10, 5)) plt.suptitle('Overlap - Ca-II-K') for i, (group, day) in enumerate(df): plt.subplot(len(df) + 1, 1, i + 2) plt.title(datetime.date(2019, 7, group)) type_group = day.groupby(day.type) plt.vlines(day[day.observatory == "kso"].date, 0, 1, color="red", label="KSO") plt.vlines(day[day.observatory == "rome"].date, 1, 2, color="green", label="Rome") plt.vlines(day[day.observatory == "rob"].date, 2, 3, color="blue", label="ROB") plt.ylim((0, 3)) myFmt = dates.DateFormatter('%H:%M') plt.gca().xaxis.set_major_formatter(myFmt) if i == 0: lgd = plt.legend(bbox_to_anchor=(1.15, 1.15), loc="upper right") plt.yticks([]) plt.tight_layout(pad=0.4, w_pad=0.8, h_pad=.8) plt.savefig("C:\\Users\\Robert\\Documents\\Uni\\SOLARNET\\HomogenizationCampaign\\ca_overlap.png", dpi=300, bbox_extra_artists=(lgd,), bbox_inches='tight') df = full_df[full_df.type == "wl"] df = df[(df.date > pytz.utc.localize(datetime.datetime(2019, 7, 17))) & (df.date < pytz.utc.localize(datetime.datetime(2019, 7, 20)))] df = df.groupby(df.date.dt.day) plt.figure(figsize=(10, 5)) plt.suptitle('Overlap - White Light') for i, (group, day) in enumerate(df): plt.subplot(len(df) + 1, 1, i + 2) plt.title(datetime.date(2019, 7, group)) type_group = day.groupby(day.type) plt.vlines(day[day.observatory == "kso"].date, 0, 1.5, color="red", label="KSO") plt.vlines(day[day.observatory == "rob"].date, 1.5, 3, color="blue", label="ROB") plt.ylim((0, 3)) myFmt = dates.DateFormatter('%H:%M') plt.gca().xaxis.set_major_formatter(myFmt) if i == 0: lgd = plt.legend(bbox_to_anchor=(1.15, 1.15), loc="upper right") plt.yticks([]) plt.tight_layout(pad=0.4, w_pad=0.8, h_pad=.8) plt.savefig("C:\\Users\\Robert\\Documents\\Uni\\SOLARNET\\HomogenizationCampaign\\wl_overlap.png", dpi=300, bbox_extra_artists=(lgd,), bbox_inches='tight')
42.156627
159
0.662761
572
3,499
3.973776
0.190559
0.015838
0.042235
0.052794
0.889573
0.889573
0.881654
0.854817
0.854817
0.83634
0
0.047322
0.130323
3,499
83
160
42.156627
0.699639
0
0
0.69697
0
0
0.159429
0.095429
0
0
0
0
0
1
0
false
0
0.090909
0
0.090909
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ff9f6fff9402c50b3f696f615c768dad99b33130
92
py
Python
chile_rut/__init__.py
gmgarciag/chile-rut
c77e996258db9adb44d5f5da1641ce7fe40866da
[ "MIT" ]
2
2017-07-12T20:11:41.000Z
2019-05-31T18:22:44.000Z
chile_rut/__init__.py
gmgarciag/chile-rut
c77e996258db9adb44d5f5da1641ce7fe40866da
[ "MIT" ]
1
2018-05-29T21:44:45.000Z
2019-01-06T16:01:22.000Z
chile_rut/__init__.py
gmgarciag/chile-rut
c77e996258db9adb44d5f5da1641ce7fe40866da
[ "MIT" ]
1
2021-06-15T19:47:09.000Z
2021-06-15T19:47:09.000Z
from .chile_rut import validate_rut, random_rut, random_ruts, format_rut, verification_digit
92
92
0.869565
14
92
5.285714
0.714286
0.243243
0
0
0
0
0
0
0
0
0
0
0.076087
92
1
92
92
0.870588
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ffad8a96c8e7e0a983922ef2a5fa40c13e43ce28
41
py
Python
tests/test_TMS/__init__.py
bigdata-ustc/EduSim
849eed229c24615e5f2c3045036311e83c22ea68
[ "MIT" ]
18
2019-11-11T03:45:35.000Z
2022-02-09T15:31:51.000Z
tests/test_TMS/__init__.py
ghzhao78506/EduSim
cb10e952eb212d8a9344143f889207b5cd48ba9d
[ "MIT" ]
3
2020-10-23T01:05:57.000Z
2021-03-16T12:12:24.000Z
tests/test_TMS/__init__.py
bigdata-ustc/EduSim
849eed229c24615e5f2c3045036311e83c22ea68
[ "MIT" ]
6
2020-06-09T21:32:00.000Z
2022-03-12T00:25:18.000Z
# coding: utf-8 # 2020/5/12 @ tongshiwei
13.666667
24
0.658537
7
41
3.857143
1
0
0
0
0
0
0
0
0
0
0
0.235294
0.170732
41
2
25
20.5
0.558824
0.878049
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
442dc12805e3e37aedc216f0d29a460502127244
2,923
py
Python
apps/gdpr/tests/test_email_service.py
pixelpassion/django-saas-boilerplate
8888d67181c760708edb18a4832d9002340878fa
[ "MIT" ]
37
2020-11-30T17:05:00.000Z
2022-03-25T11:03:23.000Z
apps/gdpr/tests/test_email_service.py
gd-js/django-saas-boilerplate
8888d67181c760708edb18a4832d9002340878fa
[ "MIT" ]
5
2021-04-08T21:58:32.000Z
2021-06-10T19:59:56.000Z
apps/gdpr/tests/test_email_service.py
gd-js/django-saas-boilerplate
8888d67181c760708edb18a4832d9002340878fa
[ "MIT" ]
7
2021-04-24T14:17:16.000Z
2022-02-08T13:38:12.000Z
from django.conf import settings as dj_settings import pytest from apps.gdpr.constants import ( INACTIVE_ACCOUNT_DELETION_DONE_TEMPLATE, INACTIVE_ACCOUNT_DELETION_WARNING_TEMPLATE, ) from apps.gdpr.email_service import GDPRSaasyEmailService from .base_test_utils import mock_gdpr_email_service_function pytestmark = pytest.mark.django_db email_service = GDPRSaasyEmailService() def test_send_inactive_account_was_deleted_email(user, mocker): mocked_email_func = mock_gdpr_email_service_function(mocker, "_send_message") bcc_email = dj_settings.INACTIVE_ACCOUNT_DELETION_BCC_EMAIL email_service.send_inactive_account_was_deleted_email(user) assert mocked_email_func.call_count == 2 for index, sent_message in enumerate(mocked_email_func.call_args_list): call_data = sent_message[0] assert call_data[0] == user.email if index else bcc_email assert call_data[1] == INACTIVE_ACCOUNT_DELETION_DONE_TEMPLATE assert len(call_data) == 2 def test_send_inactive_account_was_deleted_email_if_deletion_bcc_email_is_none( user, mocker, settings ): settings.INACTIVE_ACCOUNT_DELETION_BCC_EMAIL = None mocked_email_func = mock_gdpr_email_service_function(mocker, "_send_message") email_service.send_inactive_account_was_deleted_email(user) assert mocked_email_func.call_count == 1 call_data = mocked_email_func.call_args_list[0][0] assert call_data[0] == user.email assert call_data[1] == INACTIVE_ACCOUNT_DELETION_DONE_TEMPLATE assert len(call_data) == 2 def test_send_warning_about_upcoming_account_deletion(user, mocker): mocked_email_func = mock_gdpr_email_service_function(mocker, "_send_message") weeks = 5 bcc_email = dj_settings.INACTIVE_ACCOUNT_WARNING_BCC_EMAIL email_service.send_warning_about_upcoming_account_deletion(user, weeks) assert mocked_email_func.call_count == 2 for index, sent_message in enumerate(mocked_email_func.call_args_list): call_data = sent_message[0] assert call_data[0] == user.email if index else bcc_email assert call_data[1] == INACTIVE_ACCOUNT_DELETION_WARNING_TEMPLATE assert call_data[2] == { "WEEKS_LEFT": weeks, "PUBLIC_URL": dj_settings.PUBLIC_URL, } def test_send_warning_about_upcoming_account_deletion_if_warning_bcc_email_is_none( user, mocker, settings ): settings.INACTIVE_ACCOUNT_WARNING_BCC_EMAIL = None mocked_email_func = mock_gdpr_email_service_function(mocker, "_send_message") weeks = 5 email_service.send_warning_about_upcoming_account_deletion(user, weeks) assert mocked_email_func.call_count == 1 call_data = mocked_email_func.call_args_list[0][0] assert call_data[0] == user.email assert call_data[1] == INACTIVE_ACCOUNT_DELETION_WARNING_TEMPLATE assert call_data[2] == {"WEEKS_LEFT": weeks, "PUBLIC_URL": dj_settings.PUBLIC_URL}
36.08642
86
0.784126
410
2,923
5.092683
0.156098
0.061303
0.086207
0.072797
0.88841
0.840038
0.786398
0.786398
0.713602
0.713602
0
0.00965
0.149162
2,923
80
87
36.5375
0.829916
0
0
0.596491
0
0
0.031475
0
0
0
0
0
0.280702
1
0.070175
false
0
0.087719
0
0.157895
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
92273a71164c042d6d86cdad271336471c1151de
25
py
Python
env/Lib/site-packages/win32/pipe/__init__.py
Daniel-Key/HearStone-Python
981584d2b9502319393bd92b48f0ec8d906b4d44
[ "MIT" ]
null
null
null
env/Lib/site-packages/win32/pipe/__init__.py
Daniel-Key/HearStone-Python
981584d2b9502319393bd92b48f0ec8d906b4d44
[ "MIT" ]
1
2020-10-27T14:44:08.000Z
2020-10-27T14:44:08.000Z
env/Lib/site-packages/win32/pipe/__init__.py
Daniel-Key/HearStone-Python
981584d2b9502319393bd92b48f0ec8d906b4d44
[ "MIT" ]
null
null
null
from win32._pipe import *
25
25
0.8
4
25
4.75
1
0
0
0
0
0
0
0
0
0
0
0.090909
0.12
25
1
25
25
0.772727
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
928d6caab8acf6e01f414b4ca6d27f6cad4dabbd
27
py
Python
hermes/__init__.py
lineageos-infra/hermes
b6c76c8025ecd5aec20267bbfaa448b990f6db3a
[ "Apache-2.0" ]
null
null
null
hermes/__init__.py
lineageos-infra/hermes
b6c76c8025ecd5aec20267bbfaa448b990f6db3a
[ "Apache-2.0" ]
null
null
null
hermes/__init__.py
lineageos-infra/hermes
b6c76c8025ecd5aec20267bbfaa448b990f6db3a
[ "Apache-2.0" ]
1
2021-09-11T03:29:46.000Z
2021-09-11T03:29:46.000Z
from hermes.bot import Bot
13.5
26
0.814815
5
27
4.4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
928dc1d43638a87ccdc51652d9e784455fd8fd7e
130
py
Python
utils/segmentation/__init__.py
wufanyou/Traffic4Cast-2020-TLab
5226bb1d2db40badb33c6b0ffe659fc6e9dca544
[ "Apache-2.0" ]
3
2020-11-03T16:04:22.000Z
2021-05-22T15:38:24.000Z
utils/segmentation/__init__.py
wufanyou/Traffic4Cast-2020-TLab
5226bb1d2db40badb33c6b0ffe659fc6e9dca544
[ "Apache-2.0" ]
null
null
null
utils/segmentation/__init__.py
wufanyou/Traffic4Cast-2020-TLab
5226bb1d2db40badb33c6b0ffe659fc6e9dca544
[ "Apache-2.0" ]
null
null
null
# from https://github.com/pytorch/vision # modified by fw from .segmentation import * from .fcn import * from .deeplabv3 import *
21.666667
40
0.746154
18
130
5.388889
0.722222
0.206186
0
0
0
0
0
0
0
0
0
0.009009
0.146154
130
5
41
26
0.864865
0.407692
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2b918bb63933f33485ff7c591ee943488e0941bc
47
py
Python
scripts/portal/enter_citadel.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
54
2019-04-16T23:24:48.000Z
2021-12-18T11:41:50.000Z
scripts/portal/enter_citadel.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
3
2019-05-19T15:19:41.000Z
2020-04-27T16:29:16.000Z
scripts/portal/enter_citadel.py
G00dBye/YYMS
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
[ "MIT" ]
49
2020-11-25T23:29:16.000Z
2022-03-26T16:20:24.000Z
# 401050000 sm.warp(401050001, 0) sm.dispose()
11.75
21
0.723404
7
47
4.857143
0.857143
0
0
0
0
0
0
0
0
0
0
0.452381
0.106383
47
3
22
15.666667
0.357143
0.191489
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
2bae3f0cef249f4c70475fad7569d8039304ab96
3,970
py
Python
program/testy/test_GetValuesSingle.py
peter2141/IBT
8e6b1ac68680152ad744007aaf2b9e0a6d070d80
[ "Apache-2.0" ]
null
null
null
program/testy/test_GetValuesSingle.py
peter2141/IBT
8e6b1ac68680152ad744007aaf2b9e0a6d070d80
[ "Apache-2.0" ]
null
null
null
program/testy/test_GetValuesSingle.py
peter2141/IBT
8e6b1ac68680152ad744007aaf2b9e0a6d070d80
[ "Apache-2.0" ]
null
null
null
import unittest import sys sys.path.append('..') import getvalues import os import xml.etree.cElementTree import global_var os.system("tshark -r xml/smtp.pcap -T pdml > tmp.pdml") class TestGetValuesSingle(unittest.TestCase): def test_single_classic(self): global_var.xmlfields = [] result = None global_var.fields = ['udp.port'] values = [[] for _ in range(len(global_var.fields))] exp = ['udp.port == 25'] for event, elem in xml.etree.cElementTree.iterparse('tmp.pdml', events=('start', 'end')): if event == 'start': if elem.tag == 'field': if elem.get('name') is not None and elem.get('show') is not None: global_var.xmlfields.append({elem.get('name'): elem.get('show')}) if event == 'end': if elem.tag == 'packet': # ak koniec paketu tak nastavime flag result = getvalues.getValuesSingle(exp, values, False) break self.assertEqual(result, True) self.assertEqual(values, [['56166', '53']]) self.assertEqual(exp, ['{} == 25']) def test_single_function(self): global_var.xmlfields = [] result = None global_var.fields = ['FUNCTION'] global_var.functionvalues = [['10', '20']] values = [[] for _ in range(len(global_var.fields))] exp = ['FUNCTION == 25'] for event, elem in xml.etree.cElementTree.iterparse('tmp.pdml', events=('start', 'end')): if event == 'start': if elem.tag == 'field': if elem.get('name') is not None and elem.get('show') is not None: global_var.xmlfields.append({elem.get('name'): elem.get('show')}) if event == 'end': if elem.tag == 'packet': # ak koniec paketu tak nastavime flag result = getvalues.getValuesSingle(exp, values, False) break self.assertEqual(result, True) self.assertEqual(values, [['10', '20']]) self.assertEqual(exp, ['{} == 25']) def test_single_foreach(self): global_var.xmlfields = [] result = None global_var.fields = ['FOREACH'] global_var.foreachvalues = ['30', '40'] values = [[] for _ in range(len(global_var.fields))] exp = ['FOREACH == 25'] for event, elem in xml.etree.cElementTree.iterparse('tmp.pdml', events=('start', 'end')): if event == 'start': if elem.tag == 'field': if elem.get('name') is not None and elem.get('show') is not None: global_var.xmlfields.append({elem.get('name'): elem.get('show')}) if event == 'end': if elem.tag == 'packet': # ak koniec paketu tak nastavime flag result = getvalues.getValuesSingle(exp, values, False) break self.assertEqual(result, True) self.assertEqual(values, [['30', '40']]) self.assertEqual(exp, ['{} == 25']) def test_single_false(self): global_var.xmlfields = [] result = None global_var.fields = ['udp.port', 'testfield'] values = [[] for _ in range(len(global_var.fields))] for event, elem in xml.etree.cElementTree.iterparse('tmp.pdml', events=('start', 'end')): if event == 'start': if elem.tag == 'field': if elem.get('name') is not None and elem.get('show') is not None: global_var.xmlfields.append({elem.get('name'): elem.get('show')}) if event == 'end': if elem.tag == 'packet': # ak koniec paketu tak nastavime flag result = getvalues.getValuesSingle(['testfield == 42*udp.port'], values, False) break self.assertEqual(result, False) if __name__ == '__main__': unittest.main()
39.7
99
0.545088
451
3,970
4.711752
0.179601
0.080471
0.067765
0.041412
0.822588
0.822588
0.805176
0.758588
0.742588
0.646118
0
0.013489
0.309068
3,970
99
100
40.10101
0.76121
0.03602
0
0.646341
0
0
0.10675
0
0
0
0
0
0.121951
1
0.04878
false
0
0.073171
0
0.134146
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2bb766cfe8ae2cb8f60d640b9747217405850a4f
6,622
py
Python
router/points_queue_test.py
awesome-archive/city_visit_planner
20befca3d70db61bc83356eedd490a298b27f96f
[ "MIT" ]
1
2019-11-14T22:08:59.000Z
2019-11-14T22:08:59.000Z
router/points_queue_test.py
sandoche/city_visit_planner
20befca3d70db61bc83356eedd490a298b27f96f
[ "MIT" ]
null
null
null
router/points_queue_test.py
sandoche/city_visit_planner
20befca3d70db61bc83356eedd490a298b27f96f
[ "MIT" ]
null
null
null
import datetime import os import unittest from data import city_visit from data import read_csv from router import day_visit_cost_calculator_interface from router import points_queue as points_queue_ from router import test_util class MockDayVisitCostCalculator(day_visit_cost_calculator_interface.DayVisitCostCalculatorInterface): def __init__(self): pass def GetDayVisitParameterss(first_day, last_day): def GetDayVisitParameters(day): return city_visit.DayVisitParameters( start_datetime=datetime.datetime(2015, 7, day, 10, 0, 0), end_datetime=datetime.datetime(2015, 7, day, 15, 0, 0), lunch_start_datetime=datetime.datetime(2015, 7, day, 14, 0, 0), lunch_hours=1., start_coordinates=test_util.MockCoordinates('Hotel'), end_coordinates=test_util.MockCoordinates('Hotel')) return [GetDayVisitParameters(day) for day in range(first_day, last_day)] class OneByOnePointsQueueTest(unittest.TestCase): def setUp(self): self.points = read_csv.ReadCSVToDict(os.path.join('data', 'test_sf_1.csv')) def testGeneral(self): points = [self.points['Golden Gate Bridge'], self.points['Ferry Building'], self.points['Pier 39'], self.points['Union Square'], self.points['Twin Peaks']] day_visit_parameterss = [MockDayVisitCostCalculator()] points_queue = points_queue_.OneByOnePointsQueueGenerator().Generate(points) self.assertTrue(points_queue.HasPoints()) self.assertEqual(points, points_queue.GetPointsLeft()) self.assertEqual([self.points['Golden Gate Bridge']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertTrue(points_queue.HasPoints()) self.assertEqual([self.points['Ferry Building'], self.points['Pier 39'], self.points['Union Square'], self.points['Twin Peaks']], points_queue.GetPointsLeft()) self.assertEqual([self.points['Ferry Building']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertTrue(points_queue.HasPoints()) self.assertEqual([self.points['Pier 39'], self.points['Union Square'], self.points['Twin Peaks']], points_queue.GetPointsLeft()) self.assertEqual([self.points['Pier 39']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertTrue(points_queue.HasPoints()) self.assertEqual([self.points['Union Square'], self.points['Twin Peaks']], points_queue.GetPointsLeft()) points_queue.AddBackToQueue([self.points['Ferry Building'], self.points['Pier 39']]) self.assertTrue(points_queue.HasPoints()) self.assertEqual([self.points['Ferry Building'], self.points['Pier 39'], self.points['Union Square'], self.points['Twin Peaks']], points_queue.GetPointsLeft()) self.assertEqual([self.points['Ferry Building']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertTrue(points_queue.HasPoints()) self.assertEqual([self.points['Pier 39'], self.points['Union Square'], self.points['Twin Peaks']], points_queue.GetPointsLeft()) points_queue.GetPushPoints(day_visit_parameterss) points_queue.GetPushPoints(day_visit_parameterss) self.assertEqual([self.points['Twin Peaks']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertFalse(points_queue.HasPoints()) self.assertEqual([], points_queue.GetPointsLeft()) class AllPointsQueueTest(unittest.TestCase): def setUp(self): self.points = read_csv.ReadCSVToDict(os.path.join('data', 'test_sf_1.csv')) def testGeneral(self): day_visit_parameterss = GetDayVisitParameterss(1, 3) points = [self.points['Golden Gate Bridge'], self.points['Ferry Building'], self.points['Pier 39'], self.points['Union Square'], self.points['Lombard Street'], self.points['Coit Tower'], self.points['Att Park'], self.points['Alcatraz Island'], self.points['Golden Gate Park'], self.points['De Young Museum']] points_queue = points_queue_.AllPointsQueueGenerator(1.2).Generate(points) self.assertTrue(points_queue.HasPoints()) self.assertEqual(points, points_queue.GetPointsLeft()) self.assertEqual([self.points['Golden Gate Bridge'], self.points['Ferry Building'], self.points['Pier 39'], self.points['Union Square'], self.points['Lombard Street'], self.points['Coit Tower'], self.points['Att Park'], self.points['Alcatraz Island']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertTrue(points_queue.HasPoints()) self.assertEqual([self.points['Golden Gate Park'], self.points['De Young Museum']], points_queue.GetPointsLeft()) self.assertEqual([self.points['Golden Gate Park'], self.points['De Young Museum']], points_queue.GetPushPoints(day_visit_parameterss)) self.assertFalse(points_queue.HasPoints()) self.assertEqual([], points_queue.GetPointsLeft()) def testLargeCutOffMultiplier(self): day_visit_parameterss = GetDayVisitParameterss(1, 3) points = [self.points['Golden Gate Bridge'], self.points['Ferry Building'], self.points['Pier 39'], self.points['Union Square'], self.points['Lombard Street'], self.points['Coit Tower'], self.points['Att Park'], self.points['Alcatraz Island'], self.points['Golden Gate Park'], self.points['De Young Museum']] points_queue = points_queue_.AllPointsQueueGenerator(2.0).Generate(points) self.assertTrue(points_queue.HasPoints()) self.assertEqual(points, points_queue.GetPointsLeft()) self.assertEqual(points, points_queue.GetPushPoints(day_visit_parameterss)) self.assertFalse(points_queue.HasPoints()) self.assertEqual([], points_queue.GetPointsLeft()) if __name__ == '__main__': unittest.main()
40.876543
102
0.629719
673
6,622
6.023774
0.145617
0.155402
0.060927
0.080168
0.837445
0.801431
0.779477
0.74963
0.74963
0.736803
0
0.011731
0.253398
6,622
161
103
41.130435
0.808252
0
0
0.699248
0
0
0.11794
0
0
0
0
0
0.240602
1
0.06015
false
0.007519
0.06015
0.007519
0.157895
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2bd4c3d46510b646c954be11be736358158828e7
155
py
Python
src/business_logic.py
NewMountain/test-example
199a26d6a9bf94692fadd1b81df791db91170fea
[ "MIT" ]
null
null
null
src/business_logic.py
NewMountain/test-example
199a26d6a9bf94692fadd1b81df791db91170fea
[ "MIT" ]
null
null
null
src/business_logic.py
NewMountain/test-example
199a26d6a9bf94692fadd1b81df791db91170fea
[ "MIT" ]
null
null
null
def add_two(user_input): if "number" in user_input and isinstance(user_input["number"], int): return user_input["number"] + 2 return None
25.833333
72
0.677419
23
155
4.347826
0.608696
0.36
0.3
0
0
0
0
0
0
0
0
0.00813
0.206452
155
5
73
31
0.804878
0
0
0
0
0
0.116129
0
0
0
0
0
0
1
0.25
false
0
0
0
0.75
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
6
a6a481a69db212fdeab1330fe5252e2205f009eb
21,755
py
Python
ai.py
lestatzzz/Gomoku-python
3871d38489fc297ca5d37de18d21d553b8f38897
[ "MIT" ]
null
null
null
ai.py
lestatzzz/Gomoku-python
3871d38489fc297ca5d37de18d21d553b8f38897
[ "MIT" ]
null
null
null
ai.py
lestatzzz/Gomoku-python
3871d38489fc297ca5d37de18d21d553b8f38897
[ "MIT" ]
null
null
null
import numpy as np import copy import time class Node: """AI搜索时的一个节点""" def __init__(self, game, ope, depth, alpha, beta, force_score, player_first): """ 创建一个minimax的节点 :param game: 游戏内容。是Game类的一个对象 :param ope: 这一步的操作是什么 :param depth: 当前节点的深度 :param alpha: 这个节点初始的alpha值 :param beta: 这个节点初始的beta值 :param force_score: 是否必须算出一个分数 :param player_first: 是否玩家先出 """ self.game = game self.ope = ope self.depth = depth self.alpha = alpha self.beta = beta self.force_score = force_score self.player_first = player_first st = time.time() self.score = self.calc_score() ed = time.time() self.t = ed - st def calc_score(self): """计算这个节点的分数。对AI越有利则分数越高,反之分数越低""" # 1. 如果能够连成五子,则记为100分 res = self.game.game_result() if res == 2: return 100 elif res == 1: return -100 # 2. 判断玩家和电脑的四子的数目(需要保证:不是已经被堵死的四子) ai_4_num = 0 player_4_num = 0 for x in range(11): for y in range(15): player_cnt = sum([self.game.g_map[x][y] == 1, self.game.g_map[x + 1][y] == 1, self.game.g_map[x + 2][y] == 1, self.game.g_map[x + 3][y] == 1, self.game.g_map[x + 4][y] == 1]) ai_cnt = sum([self.game.g_map[x][y] == 2, self.game.g_map[x + 1][y] == 2, self.game.g_map[x + 2][y] == 2, self.game.g_map[x + 3][y] == 2, self.game.g_map[x + 4][y] == 2]) if player_cnt == 4 and ai_cnt == 0: player_4_num += 1 if ai_cnt == 4 and player_cnt == 0: ai_4_num += 1 for x in range(15): for y in range(11): player_cnt = sum([self.game.g_map[x][y] == 1, self.game.g_map[x][y + 1] == 1, self.game.g_map[x][y + 2] == 1, self.game.g_map[x][y + 3] == 1, self.game.g_map[x][y + 4] == 1]) ai_cnt = sum([self.game.g_map[x][y] == 2, self.game.g_map[x][y + 1] == 2, self.game.g_map[x][y + 2] == 2, self.game.g_map[x][y + 3] == 2, self.game.g_map[x][y + 4] == 2]) if player_cnt == 4 and ai_cnt == 0: player_4_num += 1 if ai_cnt == 4 and player_cnt == 0: ai_4_num += 1 for x in range(11): for y in range(11): player_cnt = sum([self.game.g_map[x][y] == 1, self.game.g_map[x + 1][y + 1] == 1, self.game.g_map[x + 2][y + 2] == 1, self.game.g_map[x + 3][y + 3] == 1, self.game.g_map[x + 4][y + 4] == 1]) ai_cnt = sum([self.game.g_map[x][y] == 2, self.game.g_map[x + 1][y + 1] == 2, self.game.g_map[x + 2][y + 2] == 2, self.game.g_map[x + 3][y + 3] == 2, self.game.g_map[x + 4][y + 4] == 2]) if player_cnt == 4 and ai_cnt == 0: player_4_num += 1 if ai_cnt == 4 and player_cnt == 0: ai_4_num += 1 for x in range(11): for y in range(11): player_cnt = sum([self.game.g_map[x + 4][y] == 1, self.game.g_map[x + 3][y + 1] == 1, self.game.g_map[x + 2][y + 2] == 1, self.game.g_map[x + 1][y + 3] == 1, self.game.g_map[x][y + 4] == 1]) ai_cnt = sum([self.game.g_map[x + 4][y] == 2, self.game.g_map[x + 3][y + 1] == 2, self.game.g_map[x + 2][y + 2] == 2, self.game.g_map[x + 1][y + 3] == 2, self.game.g_map[x][y + 4] == 2]) if player_cnt == 4 and ai_cnt == 0: player_4_num += 1 if ai_cnt == 4 and player_cnt == 0: ai_4_num += 1 # 3. 如果能够连成活四,或连成双四,则记为90分 if self.player_first: if self.depth % 2 == 0: # 该轮到玩家出了 if player_4_num >= 2: return -90 elif ai_4_num >= 2 and player_4_num == 0: return 90 else: # 该轮到电脑出了 if ai_4_num >= 2: return 90 elif player_4_num >= 2 and ai_4_num == 0: return -90 else: if self.depth % 2 == 0: # 该轮到电脑出了 if ai_4_num >= 2: return 90 elif player_4_num >= 2 and ai_4_num == 0: return -90 else: # 该轮到玩家出了 if player_4_num >= 2: return -90 elif ai_4_num >= 2 and player_4_num == 0: return 90 # 4.从这里开始,对于force_score为False的情况,分数记为±inf if self.force_score is False: if self.player_first: if self.depth % 2 == 0: # 该轮到玩家出了 return np.inf else: # 该轮到电脑出了 return -np.inf else: if self.depth % 2 == 0: # 该轮到玩家出了 return -np.inf else: # 该轮到电脑出了 return np.inf # 4. 判断玩家和电脑的活三的数目 player_3d_num = 0 ai_3d_num = 0 # 4.1. xooox的形式 for x in range(11): for y in range(15): if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y] == 1 and self.game.g_map[x + 2][y] == 1 and self.game.g_map[x + 3][y] == 1 and self.game.g_map[x + 4][y] == 0: player_3d_num += 1 if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y] == 2 and self.game.g_map[x + 2][y] == 2 and self.game.g_map[x + 3][y] == 2 and self.game.g_map[x + 4][y] == 0: ai_3d_num += 1 for x in range(15): for y in range(11): if self.game.g_map[x][y] == 0 and self.game.g_map[x][y + 1] == 1 and self.game.g_map[x][y + 2] == 1 and self.game.g_map[x][y + 3] == 1 and self.game.g_map[x][y + 4] == 0: player_3d_num += 1 if self.game.g_map[x][y] == 0 and self.game.g_map[x][y + 1] == 2 and self.game.g_map[x][y + 2] == 2 and self.game.g_map[x][y + 3] == 2 and self.game.g_map[x][y + 4] == 0: ai_3d_num += 1 for x in range(11): for y in range(11): if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y + 1] == 1 and self.game.g_map[x + 2][y + 2] == 1 and self.game.g_map[x + 3][y + 3] == 1 and self.game.g_map[x + 4][y + 4] == 0: player_3d_num += 1 if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y + 1] == 2 and self.game.g_map[x + 2][y + 2] == 2 and self.game.g_map[x + 3][y + 3] == 2 and self.game.g_map[x + 4][y + 4] == 0: ai_3d_num += 1 for x in range(11): for y in range(11): if self.game.g_map[x + 4][y] == 0 and self.game.g_map[x + 3][y + 1] == 1 and self.game.g_map[x + 2][y + 2] == 1 and self.game.g_map[x + 1][y + 3] == 1 and self.game.g_map[x][y + 4] == 0: player_3d_num += 1 if self.game.g_map[x + 4][y] == 0 and self.game.g_map[x + 3][y + 1] == 2 and self.game.g_map[x + 2][y + 2] == 2 and self.game.g_map[x + 1][y + 3] == 2 and self.game.g_map[x][y + 4] == 0: ai_3d_num += 1 # 4.2. xoxoox或xooxox的形式 for x in range(10): for y in range(15): if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y] == 1 and ((self.game.g_map[x + 2][y] == 1) ^ (self.game.g_map[x + 3][y] == 1)) and self.game.g_map[x + 4][y] == 1 and self.game.g_map[x + 5][y] == 0: player_3d_num += 1 if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y] == 2 and ((self.game.g_map[x + 2][y] == 2) ^ (self.game.g_map[x + 3][y] == 2)) and self.game.g_map[x + 4][y] == 2 and self.game.g_map[x + 5][y] == 0: ai_3d_num += 1 for x in range(15): for y in range(10): if self.game.g_map[x][y] == 0 and self.game.g_map[x][y + 1] == 1 and ((self.game.g_map[x][y + 2] == 1) ^ (self.game.g_map[x][y + 3] == 1)) and self.game.g_map[x][y + 4] == 1 and self.game.g_map[x][y + 5] == 0: player_3d_num += 1 if self.game.g_map[x][y] == 0 and self.game.g_map[x][y + 1] == 2 and ((self.game.g_map[x][y + 2] == 2) ^ (self.game.g_map[x][y + 3] == 2)) and self.game.g_map[x][y + 4] == 2 and self.game.g_map[x][y + 5] == 0: ai_3d_num += 1 for x in range(10): for y in range(10): if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y + 1] == 1 and ((self.game.g_map[x + 2][y + 2] == 1) ^ (self.game.g_map[x + 3][y + 3] == 1)) and self.game.g_map[x + 4][y + 4] == 1 and self.game.g_map[x + 5][y + 5] == 0: player_3d_num += 1 if self.game.g_map[x][y] == 0 and self.game.g_map[x + 1][y + 1] == 2 and ((self.game.g_map[x + 2][y + 2] == 2) ^ (self.game.g_map[x + 3][y + 3] == 2)) and self.game.g_map[x + 4][y + 4] == 2 and self.game.g_map[x + 5][y + 5] == 0: ai_3d_num += 1 for x in range(10): for y in range(10): if self.game.g_map[x + 5][y] == 0 and self.game.g_map[x + 4][y + 1] == 1 and ((self.game.g_map[x + 3][y + 2] == 1) ^ (self.game.g_map[x + 2][y + 3] == 1)) and self.game.g_map[x + 1][y + 4] == 1 and self.game.g_map[x][y + 5] == 0: player_3d_num += 1 if self.game.g_map[x + 5][y] == 0 and self.game.g_map[x + 4][y + 1] == 2 and ((self.game.g_map[x + 3][y + 2] == 2) ^ (self.game.g_map[x + 2][y + 3] == 2)) and self.game.g_map[x + 1][y + 4] == 2 and self.game.g_map[x][y + 5] == 0: ai_3d_num += 1 # 6. 如果能够连成四三,则记为80分 if self.player_first: if self.depth % 2 == 0: # 该轮到玩家出了 if player_4_num and player_3d_num >= 1: return -80 elif ai_4_num and ai_3d_num >= 1: return 80 else: # 该轮到电脑出了 if ai_4_num and ai_3d_num >= 1: return 80 elif player_4_num and player_3d_num >= 1: return -80 else: if self.depth % 2 == 0: # 该轮到电脑出了 if ai_4_num and ai_3d_num >= 1: return 80 elif player_4_num and player_3d_num >= 1: return -80 else: # 该轮到玩家出了 if player_4_num and player_3d_num >= 1: return -80 elif ai_4_num and ai_3d_num >= 1: return 80 # 7. 如果能够连成四子,则记为70分 if self.player_first: if self.depth % 2 == 0: # 该轮到玩家出了 if player_4_num: return -70 elif ai_4_num: return 70 else: # 该轮到电脑出了 if ai_4_num: return 70 elif player_4_num: return -70 else: if self.depth % 2 == 0: # 该轮到电脑出了 if ai_4_num: return 70 elif player_4_num: return -70 else: # 该轮到玩家出了 if player_4_num: return -70 elif ai_4_num: return 70 # 8. 如果能够连成双三,则记为60分 if self.player_first: if self.depth % 2 == 0: # 该轮到玩家出了 if player_3d_num >= 2: return -60 elif ai_3d_num >= 2: return 60 else: # 该轮到电脑出了 if ai_3d_num >= 2: return 60 elif player_3d_num >= 2: return -60 else: if self.depth % 2 == 0: # 该轮到电脑出了 if ai_3d_num >= 2: return 60 elif player_3d_num >= 2: return -60 else: # 该轮到玩家出了 if player_3d_num >= 2: return -60 elif ai_3d_num >= 2: return 60 # 9. 如果能够连成单活三,则记为50分 if self.player_first: if self.depth % 2 == 0: # 该轮到玩家出了 if player_3d_num: return -50 elif ai_3d_num: return 50 else: # 该轮到电脑出了 if ai_3d_num: return 50 elif player_3d_num: return -50 else: if self.depth % 2 == 0: # 该轮到电脑出了 if ai_3d_num: return 50 elif player_3d_num: return -50 else: # 该轮到玩家出了 if player_3d_num: return -50 elif ai_3d_num: return 50 # 10. 其他情况。按照棋子的分布来计分(根据这个棋子距离棋盘中心的距离,以及这个棋子周围8格棋子的个数来评分) score_by_num_around = [0, 1, 20, 30, 26, 24, 22, 20, 18, 16, 15] player_score_num = 0 ai_score_num = 0 player_cnt = 0 ai_cnt = 0 for x in range(15): for y in range(15): if self.game.g_map[x][y] == 1: around_cnt = 0 for x0 in range(x - 1, x + 2): for y0 in range(y - 1, y + 2): if 0 <= x0 <= 14 and 0 <= y0 <= 14 and self.game.g_map[x0][y0] != 0: around_cnt += 1 player_score_num += score_by_num_around[around_cnt] - abs(x - 7) - abs(y - 7) player_cnt += 1 if self.game.g_map[x][y] == 2: around_cnt = 0 for x0 in range(x - 1, x + 2): for y0 in range(y - 1, y + 2): if 0 <= x0 <= 14 and 0 <= y0 <= 14 and self.game.g_map[x0][y0] != 0: around_cnt += 1 ai_score_num += score_by_num_around[around_cnt] - abs(x - 7) - abs(y - 7) ai_cnt += 1 if ai_cnt == 0 or player_cnt == 0: return 0 score = ai_score_num / ai_cnt - player_score_num / player_cnt return score class AI1Step: max_node_num = 100000 # 最大允许的节点数量(避免内存占用过多) def __init__(self, init_game, init_depth, player_first): """ 决定AI这一步走什么地方 :param init_game: 初始的游戏地图 :param init_depth: 初始的深度 :param player_first: 玩家是否先出 """ node_init = Node(copy.deepcopy(init_game), None, init_depth, -np.inf, np.inf, False, player_first) # 根节点 node_init.score = -np.inf self.player_first = player_first self.method_tree = [node_init] # 策略数 self.next_node_dx_list = [-1] # 每个节点的下一步节点列表。-1表示这个节点为最终节点 self.child_node_dx_list = [[]] # 每个节点的子节点列表 self.ope_hist_list = [] # 纪录此前遍历过的操作列表 self.t = 0 def search(self, cur_node_dx, ope_hist, max_depth): """ 按照minimax和alpha-beta剪枝的方法搜索一个根节点下的最优结果。 :param cur_node_dx: 当前节点的索引值 :param ope_hist: 假象的历史状态列表 :param max_depth: 最大允许的深度 """ # 1.首先确认什么地方可以落子。落子的条件是:这个格子必须为空,周围8格内必须有至少一个棋子 ope_list = set() for x in range(15): for y in range(15): if self.method_tree[cur_node_dx].game.g_map[x][y] != 0: for x0 in range(x - 1, x + 2): for y0 in range(y - 1, y + 2): if 0 <= x0 <= 14 and 0 <= y0 <= 14 and (x0, y0) not in ope_list: if self.method_tree[cur_node_dx].game.g_map[x0][y0] == 0: ope_list.add((x0, y0)) # 2. 然后对每一个可以落子的格子进行搜索 for cell in ope_list: # 2.1 创建一个子节点,并计算这个子节点的分数 i_game = copy.deepcopy(self.method_tree[cur_node_dx].game) if self.player_first: if self.method_tree[cur_node_dx].depth % 2 == 0: # 轮到玩家出 i_game.g_map[cell[0]][cell[1]] = 1 else: # 轮到电脑出 i_game.g_map[cell[0]][cell[1]] = 2 else: if self.method_tree[cur_node_dx].depth % 2 == 0: # 轮到电脑出 i_game.g_map[cell[0]][cell[1]] = 1 else: # 轮到玩家出 i_game.g_map[cell[0]][cell[1]] = 2 if max_depth >= 2 and len(ope_list) >= 2: # 对于非最终层的节点,不急于立即算出分数 node_new = Node(i_game, cell, self.method_tree[cur_node_dx].depth + 1, self.method_tree[cur_node_dx].alpha, self.method_tree[cur_node_dx].beta, False, self.player_first) else: node_new = Node(i_game, cell, self.method_tree[cur_node_dx].depth + 1, self.method_tree[cur_node_dx].alpha, self.method_tree[cur_node_dx].beta, True, self.player_first) self.t += node_new.t self.method_tree.append(node_new) # 把这个节点插入到搜索树中 node_new_dx = len(self.method_tree) - 1 self.child_node_dx_list.append([]) self.child_node_dx_list[cur_node_dx].append(node_new_dx) # 将这个新节点记录为当前节点的子节点 self.next_node_dx_list.append(-1) # 记录每个节点下一步的动作 # ope_hist_new = copy.deepcopy(ope_hist) # 记录假象的历史状态列表 # ope_hist_new[0].add(cell[0]) # ope_hist_new[1].add(cell[1]) # self.ope_hist_list.append(ope_hist_new) if len(self.method_tree) >= self.max_node_num: # 为保护内存,搜索树的节点数目不能太多 raise ValueError('Method Tree太大了') # 2.2. 根据子节点的情况,进行父节点的后续操作 if -np.inf < self.method_tree[node_new_dx].score < np.inf: # 子节点有具体分数的情况下,就不用再进行更深层的迭代了 if self.player_first: if self.method_tree[cur_node_dx].depth % 2 == 0: # 这一步是假想中玩家走的,因此需要让分数尽量小,且应该修改beta值 if self.method_tree[node_new_dx].score < self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].beta = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: # 这一步是假想中电脑走的,因此需要让分数尽量大,且应该修改alpha值 if self.method_tree[node_new_dx].score > self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].alpha = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: if self.method_tree[cur_node_dx].depth % 2 == 0: # 这一步是假想中电脑走的,因此需要让分数尽量大,且应该修改alpha值 if self.method_tree[node_new_dx].score > self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].alpha = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: # 这一步是假想中玩家走的,因此需要让分数尽量小,且应该修改beta值 if self.method_tree[node_new_dx].score < self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].beta = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: # 子节点还没有具体分数的情况下,应该以这个子节点为下一层的根节点,进行递归,之后再进行计算 if max_depth >= 2: self.search(node_new_dx, ope_hist, max_depth - 1) # 根据递归后计算的结果,计算这个节点的分数 if self.player_first: if self.method_tree[cur_node_dx].depth % 2 == 0: # 这一步是假想中玩家走的,因此需要让分数尽量小,且应该修改beta值 if self.method_tree[node_new_dx].score < self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].beta = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: # 这一步是假想中电脑走的,因此需要让分数尽量大,且应该修改alpha值 if self.method_tree[node_new_dx].score > self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].alpha = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: if self.method_tree[cur_node_dx].depth % 2 == 0: # 这一步是假想中电脑走的,因此需要让分数尽量大,且应该修改alpha值 if self.method_tree[node_new_dx].score > self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].alpha = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx else: # 这一步是假想中玩家走的,因此需要让分数尽量小,且应该修改beta值 if self.method_tree[node_new_dx].score < self.method_tree[cur_node_dx].score: self.method_tree[cur_node_dx].score = self.method_tree[node_new_dx].score self.method_tree[cur_node_dx].beta = self.method_tree[node_new_dx].score self.next_node_dx_list[cur_node_dx] = node_new_dx if self.method_tree[cur_node_dx].alpha > self.method_tree[cur_node_dx].beta: # alpha-beta剪枝 return
52.170264
245
0.493955
3,270
21,755
3.063609
0.060856
0.068876
0.110202
0.158115
0.782392
0.765322
0.760331
0.754442
0.746157
0.723098
0
0.057929
0.38028
21,755
416
246
52.295673
0.685062
0.082188
0
0.716332
0
0
0.00071
0
0
0
0
0
0
1
0.011461
false
0
0.008596
0
0.169054
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a6af04572a65baf1af67598c5fe3ef71e6a22491
383
py
Python
exec.py
lbaiao/sys-simulator-2
94f00d43309fe7b56dac5099bd4024695ba317b6
[ "MIT" ]
1
2020-06-14T13:50:28.000Z
2020-06-14T13:50:28.000Z
exec.py
lbaiao/sys-simulator-2
94f00d43309fe7b56dac5099bd4024695ba317b6
[ "MIT" ]
null
null
null
exec.py
lbaiao/sys-simulator-2
94f00d43309fe7b56dac5099bd4024695ba317b6
[ "MIT" ]
null
null
null
# from scripts_dql.script47 import run # from scripts_a2c.script15 import run from scripts_ddpg.script1 import run # from scripts_benchmarks.script2 import run # from scripts_gym.script2 import run # from scripts_gym.script4 import run # from scripts_gym.script3 import run # from scripts_gym.script6 import run # from scripts_gym.script7 import run # run training and tests run()
27.357143
44
0.81201
59
383
5.118644
0.338983
0.327815
0.344371
0.529801
0.427152
0.198676
0
0
0
0
0
0.036254
0.13577
383
13
45
29.461538
0.876133
0.832898
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
a6ee8217396d5d11561437a3e2510290423abf0b
10,094
py
Python
test/test_client_visitor.py
bblommers/aws-analytics
be243bab6ded96a3d0563593ca57b4af536ea0a1
[ "MIT" ]
null
null
null
test/test_client_visitor.py
bblommers/aws-analytics
be243bab6ded96a3d0563593ca57b4af536ea0a1
[ "MIT" ]
null
null
null
test/test_client_visitor.py
bblommers/aws-analytics
be243bab6ded96a3d0563593ca57b4af536ea0a1
[ "MIT" ]
null
null
null
import pytest from dynamo.data import DynamoClient from ._location import location class Test_addVisitor(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_addVisitor( self, table_name, visitor ): client = DynamoClient( table_name ) result = client.addVisitor( visitor ) assert 'visitor' in result.keys() assert result['visitor'] == visitor @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_duplicate_addVisitor( self, table_name, visitor ): client = DynamoClient( table_name ) result = client.addVisitor( visitor ) result = client.addVisitor( visitor ) assert 'error' in result.keys() assert result['error'] == f'Visitor already in table { visitor }' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_parameter_addVisitor( self, table_name ): client = DynamoClient( table_name ) with pytest.raises( ValueError ) as e: assert client.addVisitor( {} ) assert str( e.value ) == 'Must pass a Visitor object' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_table_addVisitor( self, visitor ): client = DynamoClient( 'no name' ) result = client.addVisitor( visitor ) assert 'error' in result.keys() assert result['error'] == 'Could not add new visitor to table' class Test_updateVisitor(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_updateVisitor( self, table_name, visitor ): client = DynamoClient( table_name ) client.addVisitor( visitor ) result = client.updateVisitor( visitor ) assert 'visitor' in result.keys() assert result['visitor'] == visitor @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_parameter_updateVisitor( self, table_name ): client = DynamoClient( table_name ) with pytest.raises( ValueError ) as e: assert client.updateVisitor( {} ) assert str( e.value ) == 'Must pass a Visitor object' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_none_updateVisitor( self, table_name, visitor ): client = DynamoClient( table_name ) result = client.updateVisitor( visitor ) assert 'error' in result.keys() assert result['error'] == f'Visitor not in table { visitor }' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_table_updateVisitor( self, visitor ): client = DynamoClient( 'no name' ) result = client.updateVisitor( visitor ) assert 'error' in result.keys() assert result['error'] == 'Could not update visitor in table' class Test_removeVisitor(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_removeVisitor( self, table_name, visitor ): client = DynamoClient( table_name ) client.addVisitor( visitor ) result = client.removeVisitor( visitor ) assert 'visitor' in result.keys() assert result['visitor'] == visitor @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_none_removeVisitor( self, table_name, visitor ): client = DynamoClient( table_name ) result = client.removeVisitor( visitor ) assert 'error' in result.keys() assert result['error'] == f'Visitor not in table { visitor }' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_parameter_removeVisitor( self, table_name ): with pytest.raises( ValueError ) as e: assert DynamoClient( table_name ).removeVisitor( {} ) assert str( e.value ) == 'Must pass a Visitor object' class Test_incrementVisitorSessions(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_incrementVisitorSessions( self, table_name, visitor ): client = DynamoClient( table_name ) client.addVisitor( visitor ) result = client.incrementVisitorSessions( visitor ) visitor.numberSessions += 1 assert 'visitor' in result.keys() assert result['visitor'] == visitor @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_none_incrementVisitorSessions( self, table_name, visitor ): result = DynamoClient( table_name ).incrementVisitorSessions( visitor ) visitor.numberSessions += 1 assert 'error' in result.keys() assert result['error'] == 'Visitor not in table' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_parameter_incrementVisitorSessions( self, table_name ): with pytest.raises( ValueError ) as e: assert DynamoClient( table_name ).incrementVisitorSessions( {} ) assert str( e.value ) == 'Must pass a Visitor object' class Test_decrementVisitorSessions(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_incrementVisitorSessions( self, table_name, visitor ): client = DynamoClient( table_name ) client.addVisitor( visitor ) result = client.decrementVisitorSessions( visitor ) visitor.numberSessions -= 1 assert 'visitor' in result.keys() assert result['visitor'] == visitor @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_none_decrementVisitorSessions( self, table_name, visitor ): result = DynamoClient( table_name ).decrementVisitorSessions( visitor ) visitor.numberSessions += 1 assert 'error' in result.keys() assert result['error'] == 'Visitor not in table' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_parameter_decrementVisitorSessions( self, table_name ): with pytest.raises( ValueError ) as e: assert DynamoClient( table_name ).decrementVisitorSessions( {} ) assert str( e.value ) == 'Must pass a Visitor object' class Test_addNewVisitor(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_addNewVisitor( self, table_name, visitor, browsers, visits, session ): client = DynamoClient( table_name ) result = client.addNewVisitor( visitor, location(), browsers, visits ) assert 'visitor' in result.keys() assert result['visitor'] == visitor assert 'browsers' in result.keys() assert result['browsers'] == browsers assert 'location' in result.keys() assert dict( result['location'] ) == dict( location() ) assert 'visits' in result.keys() assert result['visits'] == visits assert 'session' in result.keys() assert dict( result['session'] ) == dict( session ) @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_duplicate_visitor_addNewVisitor( self, table_name, visitor, browsers, visits ): client = DynamoClient( table_name ) result = client.addVisitor( visitor ) result = client.addNewVisitor( visitor, location(), browsers, visits ) assert 'error' in result.keys() assert result['error'] == f'Visitor already in table { visitor }' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_duplicate_location_addNewVisitor( self, table_name, visitor, browsers, visits ): client = DynamoClient( table_name ) result = client.addLocation( location() ) result = client.addNewVisitor( visitor, location(), browsers, visits ) assert 'error' in result.keys() assert result['error'] == 'Visitor\'s location is already in table ' + \ f'{ location() }' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_duplicate_session_addNewVisitor( self, table_name, visitor, browsers, visits, session ): client = DynamoClient( table_name ) result = client.addSession( session ) result = client.addNewVisitor( visitor, location(), browsers, visits ) assert 'error' in result.keys() assert result['error'] == 'Visitor\'s session is already in table ' + \ f'{ session }' class Test_getVisitorDetails(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_getVisitorDetails( self, table_name, visitor, browsers, visits ): print( 'location', location() ) client = DynamoClient( table_name ) result = client.addNewVisitor( visitor, location(), browsers, visits ) print( 'result', result ) result = client.getVisitorDetails( visitor ) print( 'result', result ) assert 'visitor' in result.keys() assert dict( result['visitor'] ) == dict( visitor ) assert 'browsers' in result.keys() assert all( [ dict( result['browsers'][index] ) == dict( browsers[index] ) for index in range( len( browsers ) ) ] ) assert 'location' in result.keys() assert dict( result['location'] ) == dict( location() ) assert 'visits' in result.keys() assert all( [ dict( result['visits'][index] ) == dict(visits[index]) for index in range( len( visits ) ) ] ) assert 'sessions' in result.keys() @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_parameter_getVisitorDetails( self, table_name ): with pytest.raises( ValueError ) as e: assert DynamoClient( table_name ).getVisitorDetails( {} ) assert str( e.value ) == 'Must pass a Visitor object' @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_none_getVisitorDetails( self, table_name, visitor ): result = DynamoClient( table_name ).getVisitorDetails( visitor ) assert 'error' in result.keys() assert result['error'] == 'Visitor not in table' @pytest.mark.usefixtures( 'dynamo_client' ) def test_table_getVisitorDetails( self, table_name, visitor ): result = DynamoClient( table_name ).getVisitorDetails( visitor ) assert 'error' in result.keys() assert result['error'] == 'Could not get visitor from table' class Test_listVisitors(): @pytest.mark.usefixtures( 'dynamo_client', 'table_init' ) def test_listVisitors( self, table_name, visitor ): client = DynamoClient( table_name ) client.addVisitor( visitor ) result = client.listVisitors() assert isinstance( result, list ) assert len( result ) == 1 @pytest.mark.usefixtures( 'dynamo_client' ) def test_table_listVisitors( self, table_name ): result = DynamoClient( table_name ).listVisitors() assert 'error' in result.keys() assert result['error'] == 'Could not get visitors from table'
39.897233
76
0.698237
1,135
10,094
6.068722
0.066079
0.065331
0.04878
0.105836
0.853078
0.834785
0.799216
0.785134
0.75363
0.695993
0
0.000607
0.18407
10,094
252
77
40.055556
0.835721
0
0
0.651786
0
0
0.148405
0
0
0
0
0
0.308036
1
0.120536
false
0.026786
0.013393
0
0.169643
0.013393
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
471247d25d9088f6cf92500cccf4f9edc1be50ea
108
py
Python
app/main/__init__.py
wasongapaul5/Pitch
1270022797122b9d2ede567783c1ce44da445015
[ "MIT" ]
null
null
null
app/main/__init__.py
wasongapaul5/Pitch
1270022797122b9d2ede567783c1ce44da445015
[ "MIT" ]
3
2021-06-08T23:02:05.000Z
2022-01-13T03:38:12.000Z
app/main/__init__.py
wasongapaul5/Pitch
1270022797122b9d2ede567783c1ce44da445015
[ "MIT" ]
null
null
null
from flask import Blueprint main = Blueprint('main',__name__) from.views import * from . import views,forms
21.6
33
0.777778
15
108
5.333333
0.533333
0.325
0
0
0
0
0
0
0
0
0
0
0.12963
108
5
34
21.6
0.851064
0
0
0
0
0
0.036697
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
6
5b4a5fa87b1b864d569407f7a58ad86575a24e1c
42
py
Python
broccoli/tool/editor/__init__.py
naritotakizawa/broccoli
7feddc9353313cc2ba0d39228a4109acfdd71d4f
[ "MIT" ]
5
2018-08-08T07:17:49.000Z
2018-10-09T02:42:29.000Z
broccoli/tool/editor/__init__.py
naritotakizawa/broccoli
7feddc9353313cc2ba0d39228a4109acfdd71d4f
[ "MIT" ]
68
2018-07-05T07:12:34.000Z
2020-12-28T04:51:32.000Z
broccoli/tool/editor/__init__.py
naritotakizawa/broccoli
7feddc9353313cc2ba0d39228a4109acfdd71d4f
[ "MIT" ]
null
null
null
from .canvas import * from .list import *
14
21
0.714286
6
42
5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.190476
42
2
22
21
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5bac564bf55b71895f34f58e063f54ecc2ea809f
70
py
Python
tests/python/test_nyxus.py
sameeul/nyxus
46210ac218b456f822139e884dfed4bd2fdbbfce
[ "MIT" ]
null
null
null
tests/python/test_nyxus.py
sameeul/nyxus
46210ac218b456f822139e884dfed4bd2fdbbfce
[ "MIT" ]
6
2022-02-09T20:42:43.000Z
2022-03-24T20:14:47.000Z
tests/python/test_nyxus.py
sameeul/nyxus
46210ac218b456f822139e884dfed4bd2fdbbfce
[ "MIT" ]
4
2022-02-03T20:26:23.000Z
2022-02-17T02:59:27.000Z
import nyxus def test_import(): assert nyxus.__name__ == "nyxus"
17.5
37
0.7
9
70
4.888889
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.185714
70
4
37
17.5
0.77193
0
0
0
0
0
0.070423
0
0
0
0
0
0.333333
1
0.333333
true
0
0.666667
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
5bb61b9131f7935780771fb8348a8b450cd9dbea
15,899
py
Python
main.py
gabrielacorona/sisOpsPYTHON
ef42cfb40d8c140798c7e69db9a539cfefabea10
[ "MIT" ]
1
2020-02-06T22:18:08.000Z
2020-02-06T22:18:08.000Z
main.py
gabrielacorona/sisOpsPYTHON
ef42cfb40d8c140798c7e69db9a539cfefabea10
[ "MIT" ]
null
null
null
main.py
gabrielacorona/sisOpsPYTHON
ef42cfb40d8c140798c7e69db9a539cfefabea10
[ "MIT" ]
null
null
null
# Proyecto sistemas operativos # Implementacion FIFO LRU # Version de lenguaje: Python 3 # Integrantes del equipo: # Gabriela Corona Garza A01282529 # Marlon Omar Lopez A00139431 # Paulina Gonzalez Davalos A01194111 # Jorge Arturo Ramirez A01088601 # Victor Andres Villarreal Grimaldo A01039863 # *************** COMENZAR *************** # C = Comenzamos # Comenzar una ejecucion del programa # *************** CARGAR UN PROCESO *************** # P n k # n = nmero de bytes para cargar a la memoria # k = nmero entero arbitrario que indica el id del proceso # ejemplo # P 124 1 --> asignar 124 bytes al proceso 1 # *************** ACCESAR LA DIRECCION VIRTUAL *************** # A d p m # d = direccion virtual # p = id del proceso # m = 1 --> se lee # m = 0 --> se modifica # ejemplo # A 17 5 0 --> accesar para lectura la direccion virtual 17 del proceso 5 # *************** LIBERAR PAGINAS DEL PROCESO *************** # L p --> liberar las paginas del proceso p # output --> comando de input y lista de marcos de pagina que se liberaron # *************** COMENTARIO *************** # Si la lnea del input no va con los comandos solo imprimirla # *************** FIN *************** # F = Fin # despliega un reporte de estadsiticas que incluye: # - turnaround time de cada proceso que se consider --> diferencia de timestamps # - turnaround promedio # - nmero de page faults # - nmero de page faults por proceso # - nmero total de operaciones swap-out swap-in # *************** EXIT *************** # E = se termina el programa y se despliega un mensaje de despedida from timeit import default_timer as timer def FIFO(comandos): print( ' -------------- FIFO --------------') print('') queue = [] pageFaults = {} memoriaV = [] memoriaActual = 2048 memoriaVirtual = 4096 """ Como se compone nuestra estructura que simula la memoria virtual queue de pairs pairs [lista con la informacion del proceso, milisegundos] """ for comand in comandos: if comand[0] == 'P': # cargar un proceso if memoriaActual - comand[1] > 0: #checa si cabe en la memoria start = timer() pair = [] #crea el pair que se compone del comando y el timestamp memoriaActual -= comand[1] #resta la memoria que ocupa ese proceso pair.append(comand) pair.append(start) queue.append(pair) #mete el pair a la queue if comand[2] in pageFaults: #agrega al dict de pagefaults que genera cada uno de los procesos pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 else: #si no cabe dentro de la memoria, comienza a sacar el primer elemento de la queue hasta que pueda entrar el proceso while memoriaActual - comand[1] < 0 and pair in queue: #mientras que no quepa dentro de la memoria sigue sacando o mientras que haya elementos en la queue temp = pair[0] memoriaActual += temp[1] memoriaV.append(queue[0]) #mete a memoria virtual los procesos que se van sacando para meter el proceso grande queue.pop(0) if comand[2] in pageFaults: #agrega al dict de pagefaults que genera cada uno de los procesos pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 if comand[0] == 'L': #libera de la memoria el proceso con el id correspondiente for pair in queue: temp = (pair[0]) if temp[2] == comand[1]: #cada que se libera un proceso de la memoria principal se calcula su turnaround y se intercambia end = timer() start = pair[1] pair.pop() pair.append(end-start) memoriaV.append(pair) #mete a memoria virtual el proceso se que libero memoriaVirtual -= pair[1] #actualiza la memoria actual restando los bytes que se ocuparon de la memoria virtual memoriaActual += pair[1] #actualiza la memoria actual agregandole los bytes que se liberaron queue.remove(pair) #saca de la queue el elemento correspondiente if comand[0] == 'A': #Leer o modificar un proceso if comand[3] == 0: #leer, si no esta en la memoria principal se genera un pagefault for pair in queue: temp = pair[0] if temp[2] == comand[2]: print('Lectura de Proceso '+str(temp)) for pair in memoriaV: #si no esta en la memoria principal se genera un pagefault temp = pair[0] if temp[2] == comand[2]: print('El proceso ' + str(temp) + ' no se encuentra en la memoria principal') if comand[2] in pageFaults: pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 if comand[3] == 1: #modificar for pair in queue: temp = pair[0] if temp[2] == comand[2]: pair = [] appnd = temp[2] #se intercambian los valores por los nuevos del comando temp.remove(temp[1]) temp.remove(temp[1]) temp.append(comand[1]) temp.append(appnd) else: if comand[2] in pageFaults: pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 # *************** FIN *************** # despliega un reporte de estadsiticas que incluye: # - turnaround time de cada proceso que se consider --> diferencia de timestamps # - turnaround promedio # - nmero de page faults # - nmero de page faults por proceso # - nmero total de operaciones swap-out swap-in if comand[0] == 'F': totalPf = 0 totalTurn = 0 totalPro = 0 for f in pageFaults: totalPf += pageFaults[f] print('Page faults totales: ' + str(totalPf) ) print('') print('Page Faults por id de proceso: ') print('') for f in pageFaults: print( str(f) + ' = ' + str(pageFaults[f])) print('') print('Turnaround por proceso FIFO') for m in memoriaV: temp = m[0] totalTurn += m[1] totalPro += 1 print(str(temp[2]) + ' = ' + str(m[1])) for m in queue: temp = m[0] totalTurn += m[1] totalPro += 1 print(str(temp[2]) + ' = ' + str(m[1])) print('') if totalPro != 0: print('Turnaround promedio') print(totalTurn / totalPro) print('') #restartear las variables que simulan la memoria antes de volver a empezar con otro proceso queue = [] pageFaults = {} memoriaV = [] memoriaActual = 2048 memoriaVirtual = 4096 def LRU(comandos): print( ' -------------- LRU --------------') print('') queue = [] pageFaults = {} memoriaV = [] memoriaActual = 2048 memoriaVirtual = 4096 """ Como se compone nuestra estructura que simula la memoria virtual queue de trios trios [lista con la informacion del proceso, milisegundos desde que empezo, antiguedad] """ for comand in comandos: if comand[0] == 'P': # cargar un proceso if memoriaActual - comand[1] > 0: #checa si cabe en la memoria start = timer() trio = [] #crea el trio que se compone del comando y el timestamp memoriaActual -= comand[1] #resta la memoria que ocupa ese proceso trio.append(comand) trio.append(start) trio.append(start) queue.append(trio) #mete el trio a la queue if comand[2] in pageFaults: #agrega al dict de pagefaults que genera cada uno de los procesos pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 else: #si no cabe dentro de la memoria, comienza a sacar el primer elemento de la queue hasta que pueda entrar el proceso while memoriaActual - comand[1] < 0 and trio in queue: #mientras que no quepa dentro de la memoria sigue sacando o mientras que haya elementos en la queue temp = trio[0] memoriaActual += temp[1] memoriaV.append(queue[0]) #mete a memoria virtual los procesos que se van sacando para meter el proceso grande queue.pop(0) if comand[2] in pageFaults: #agrega al dict de pagefaults que genera cada uno de los procesos pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 if comand[0] == 'L': #libera de la memoria el proceso con el id correspondiente oldestPro = -1 popId = -1 proTemp = [] for trio in queue: #se hace inserta a la lista el tiempo que lleva el proceso dentro temp = trio[0] end = timer() start = trio[1] trio.pop() trio.append(end-start) for trio in queue: #saca el id del proceso mas viejo temp = trio[2] if temp > oldestPro: oldestPro = temp proTemp = trio[0] popId = proTemp[2] for trio in queue: temp = (trio[0]) if temp[2] == popId: #saca el proceso mas viejo y se calcula su turnaround time end = timer() start = trio[1] appnd = trio[2] trio.pop() trio.pop() trio.append(end-start) #intercambia los valores para guardar el turnaround trio.append(appnd) memoriaV.append(trio) #mete a memoria virtual el proceso se que libero memoriaVirtual -= trio[1] #actualiza la memoria actual restando los bytes que se ocuparon de la memoria virtual memoriaActual += trio[1] #actualiza la memoria actual agregandole los bytes que se liberaron queue.remove(trio) #saca de la queue el elemento correspondiente if comand[0] == 'A': #Leer o modificar un proceso if comand[3] == 0: #leer, si no esta en la memoria principal se genera un pagefault for trio in queue: temp = trio[0] if temp[2] == comand[2]: print('Lectura de Proceso '+str(temp)) start = timer() trio.pop() trio.append(start) for trio in memoriaV: #si no esta en la memoria principal se genera un pagefault temp = trio[0] if temp[2] == comand[2]: print('El proceso ' + str(temp) + ' no se encuentra en la memoria principal') if comand[2] in pageFaults: pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 if comand[3] == 1: #modificar for trio in queue: temp = trio[0] if temp[2] == comand[2]: trio = [] appnd = temp[2] #se intercambian los valores por los nuevos del comando temp.remove(temp[1]) temp.remove(temp[1]) temp.append(comand[1]) temp.append(appnd) else: if comand[2] in pageFaults: pageFaults[comand[2]] += 1 else: pageFaults[comand[2]] = 1 # *************** FIN *************** # despliega un reporte de estadsiticas que incluye: # - turnaround time de cada proceso que se consider --> diferencia de timestamps # - turnaround promedio # - nmero de page faults # - nmero de page faults por proceso # - nmero total de operaciones swap-out swap-in if comand[0] == 'F': totalPf = 0 totalTurn = 0 totalPro = 0 for f in pageFaults: totalPf += pageFaults[f] print('Page faults totales: ' + str(totalPf) ) print('') print('Page Faults por id de proceso: ') print('') for f in pageFaults: print( str(f) + ' = ' + str(pageFaults[f])) print('Turnaround por proceso LRU') print('') for m in memoriaV: temp = m[0] totalTurn += m[1] totalPro += 1 print(str(temp[2]) + ' = ' + str(m[1])) for m in queue: temp = m[0] totalTurn += m[1] totalPro += 1 print(str(temp[2]) + ' = ' + str(m[1])) print('') if totalPro != 0: print('Turnaround promedio') print(totalTurn / totalPro) #restartear las variables que simulan la memoria antes de volver a empezar con otro proceso queue = [] pageFaults = {} memoriaV = [] memoriaActual = 2048 memoriaVirtual = 4096 def main(): f = open("txtFiles/ArchivoTrabajo.txt", "r") lines = f.read().splitlines() comandos = [] for i, linea in enumerate(lines): words = linea.rstrip() words = words.lstrip() words =' '.join(words.split()) """ L: int P: int int A: int int int """ listaLineas = words.split() comando = [] if words[0] == 'A': comando.append(listaLineas[0]) comando.append(int(listaLineas[1])) comando.append(int(listaLineas[2])) comando.append(int(listaLineas[3])) if words[0] == 'P': comando.append(listaLineas[0]) comando.append(int(listaLineas[1])) comando.append(int(listaLineas[2])) if words[0] == 'L': comando.append(listaLineas[0]) comando.append(int(listaLineas[1])) if words[0] == 'C': comando.append(listaLineas[0]) if words[0] == 'E': comando.append(listaLineas[0]) if words[0] == 'F': comando.append(listaLineas[0]) #else: comandos.append(comando) # print(comandos) FIFO(comandos) LRU(comandos) if __name__ == '__main__': main()
34.563043
138
0.487641
1,713
15,899
4.520724
0.152948
0.027118
0.035124
0.03719
0.735537
0.730888
0.722366
0.702738
0.702738
0.694861
0
0.027322
0.412982
15,899
460
139
34.563043
0.802422
0.298761
0
0.764045
0
0
0.044539
0.002542
0
0
0
0
0
1
0.011236
false
0
0.003745
0
0.014981
0.123596
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
75205f2ea07245576c5b01f1a8e46a7b62b47566
228
py
Python
Spectrometers/__init__.py
UltrafastCornell/Devices
f51f99ff5c46c39d3c98bf630f0bbd792ee81719
[ "MIT" ]
1
2019-03-24T14:59:14.000Z
2019-03-24T14:59:14.000Z
Spectrometers/__init__.py
UltrafastCornell/Devices
f51f99ff5c46c39d3c98bf630f0bbd792ee81719
[ "MIT" ]
null
null
null
Spectrometers/__init__.py
UltrafastCornell/Devices
f51f99ff5c46c39d3c98bf630f0bbd792ee81719
[ "MIT" ]
null
null
null
# Import Camera base class from Devices.Spectrometers.Spectrometer import Spectrometer # Load individual spectrometer classes from Devices.Spectrometers.OceanOptics import OceanOptics from Devices.Spectrometers.Ando import Ando
38
59
0.868421
26
228
7.615385
0.5
0.166667
0.363636
0
0
0
0
0
0
0
0
0
0.096491
228
6
60
38
0.961165
0.267544
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f34189cd61e979124b40087df00af2d9d107b7f1
26
py
Python
samples/wizard/__init__.py
zoho/zohocrm-python-sdk-2.1
cde6fcd1c5c8f7a572154ebb2b947ec697c24209
[ "Apache-2.0" ]
null
null
null
samples/wizard/__init__.py
zoho/zohocrm-python-sdk-2.1
cde6fcd1c5c8f7a572154ebb2b947ec697c24209
[ "Apache-2.0" ]
null
null
null
samples/wizard/__init__.py
zoho/zohocrm-python-sdk-2.1
cde6fcd1c5c8f7a572154ebb2b947ec697c24209
[ "Apache-2.0" ]
null
null
null
from .wizard import Wizard
26
26
0.846154
4
26
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f35fcdc758444ce766bd6c8e9023116fc917c010
128
py
Python
tests/test_kbfs_upload.py
da-code-a/KBFS-Upload-API
3269987b142b2352f7c7b66ffd8872416d01df5e
[ "MIT" ]
1
2021-10-05T04:54:27.000Z
2021-10-05T04:54:27.000Z
tests/test_kbfs_upload.py
da-code-a/KBFS-Upload-API
3269987b142b2352f7c7b66ffd8872416d01df5e
[ "MIT" ]
null
null
null
tests/test_kbfs_upload.py
da-code-a/KBFS-Upload-API
3269987b142b2352f7c7b66ffd8872416d01df5e
[ "MIT" ]
null
null
null
from kbfs_upload import __version__ def test_version(): assert __version__ == "0.1.0" # nosec Bandit should ignore this.
21.333333
69
0.734375
18
128
4.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0.028571
0.179688
128
5
70
25.6
0.771429
0.25
0
0
0
0
0.053191
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
f3c8be5781156149eb5fbb8e74d6a9b195f11f44
73
py
Python
funcy/__init__.py
tushushu/funcy
2bb4d10d7c98431a3bdc70c93552a12556c24341
[ "MIT" ]
null
null
null
funcy/__init__.py
tushushu/funcy
2bb4d10d7c98431a3bdc70c93552a12556c24341
[ "MIT" ]
null
null
null
funcy/__init__.py
tushushu/funcy
2bb4d10d7c98431a3bdc70c93552a12556c24341
[ "MIT" ]
null
null
null
""" Author: tushushu Date: 2021-09-20 13:10:45 """ from .src import Iter
12.166667
25
0.671233
13
73
3.769231
1
0
0
0
0
0
0
0
0
0
0
0.225806
0.150685
73
6
26
12.166667
0.564516
0.575342
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
341c134d3a85459dbfcd4de4c08752904972a82d
4,658
py
Python
sqlpie/models/model_classifier.py
lessaworld/sqlpie
22cac1fc7f9cb939e823058f84a68988e03ab239
[ "MIT" ]
3
2016-01-27T19:49:23.000Z
2020-08-18T13:59:02.000Z
sqlpie/models/model_classifier.py
lessaworld/sqlpie
22cac1fc7f9cb939e823058f84a68988e03ab239
[ "MIT" ]
null
null
null
sqlpie/models/model_classifier.py
lessaworld/sqlpie
22cac1fc7f9cb939e823058f84a68988e03ab239
[ "MIT" ]
1
2016-02-01T01:57:54.000Z
2016-02-01T01:57:54.000Z
# -*- coding: utf-8 -*- """ SQLpie License (MIT License) Copyright (c) 2011-2016 André Lessa, http://sqlpie.com See LICENSE file. """ from flask import g import sqlpie class ModelClassifier(object): __tablename = "model_classifiers" LABEL_TYPE = 0 FEATURE_TYPE = 1 LABEL_FEATURE_TYPE = 2 NULL_MAGIC_VALUE = "nil" def __init__(self, model_id): self.model_id = model_id def increment_label(self, subject_id, label, incr=1): sql = "INSERT INTO " + self.__tablename sql += " (model_id, subject_id, score_type, label, feature) VALUES (UNHEX(%s), UNHEX(%s), %s, %s, %s)" sql += " ON DUPLICATE KEY UPDATE score = score + %s" g.cursor.execute(sql, (self.model_id, subject_id, ModelClassifier.LABEL_TYPE, label, ModelClassifier.NULL_MAGIC_VALUE, incr)) if sqlpie.Util.is_debug(): print g.cursor._executed def increment_feature(self, subject_id, feature, incr): sql = "INSERT INTO " + self.__tablename sql += " (model_id, subject_id, score_type, label, feature) VALUES (UNHEX(%s), UNHEX(%s), %s, %s, %s)" sql += " ON DUPLICATE KEY UPDATE score = score + %s" g.cursor.execute(sql, (self.model_id, subject_id, ModelClassifier.FEATURE_TYPE, ModelClassifier.NULL_MAGIC_VALUE, feature, incr)) if sqlpie.Util.is_debug(): print g.cursor._executed def increment_label_feature(self, subject_id, label, feature, incr): sql = "INSERT INTO " + self.__tablename sql += " (model_id, subject_id, score_type, label, feature) VALUES (UNHEX(%s), UNHEX(%s), %s, %s, %s)" sql += " ON DUPLICATE KEY UPDATE score = score + %s" g.cursor.execute(sql, (self.model_id, subject_id, ModelClassifier.LABEL_FEATURE_TYPE, label, feature, incr)) if sqlpie.Util.is_debug(): print g.cursor._executed def clear(self): sql = "DELETE FROM " + self.__tablename +" where model_id = UNHEX(%s)" g.cursor.execute(sql, (self.model_id,)) if sqlpie.Util.is_debug(): print g.cursor._executed def get_labels(self, subject_id): sql = "SELECT label, score FROM " sql += self.__tablename + " WHERE model_id = UNHEX(%s) and subject_id = UNHEX(%s) and score_type = 0" g.cursor.execute(sql, (self.model_id, subject_id,)) if sqlpie.Util.is_debug(): print g.cursor._executed data = g.cursor.fetchall() response = {} if data: for i in data: response[i[0]] = i[1] return response def get_document_features(self, subject_id, features): # todo : get top N features sql = "SELECT feature, score FROM " sql += self.__tablename + " WHERE model_id = UNHEX(%s) and subject_id = UNHEX(%s) and score_type = 1 and feature in %s" g.cursor.execute(sql, (self.model_id, subject_id, features)) if sqlpie.Util.is_debug(): print g.cursor._executed data = g.cursor.fetchall() response = {} if data: for i in data: response[i[0]] = i[1] return response def sum_all_features(self, subject_id): sql = "SELECT sum(score) FROM " sql += self.__tablename + " WHERE model_id = UNHEX(%s) and subject_id = UNHEX(%s) and score_type = 1" g.cursor.execute(sql, (self.model_id, subject_id,)) if sqlpie.Util.is_debug(): print g.cursor._executed data = g.cursor.fetchone() return data[0] or 0 def get_label_features(self, subject_id, label, features): sql = "SELECT feature, score FROM " sql += self.__tablename + " WHERE model_id = UNHEX(%s) and subject_id = UNHEX(%s) and score_type = 2 and feature in %s " sql += " and label = %s " g.cursor.execute(sql, (self.model_id, subject_id, features, label)) if sqlpie.Util.is_debug(): print g.cursor._executed data = g.cursor.fetchall() response = {} if data: for i in data: response[i[0]] = i[1] return response def sum_feature_values(self, subject_id, label): sql = "SELECT sum(score) FROM " sql += self.__tablename + " WHERE model_id = UNHEX(%s) and subject_id = UNHEX(%s) and score_type = 2 and label = %s" g.cursor.execute(sql, (self.model_id, subject_id, label,)) if sqlpie.Util.is_debug(): print g.cursor._executed data = g.cursor.fetchone() return data[0] or 0 @staticmethod def reset(): sql = "TRUNCATE " + ModelClassifier.__tablename g.cursor.execute(sql)
40.155172
137
0.610992
625
4,658
4.3536
0.1408
0.079383
0.044469
0.064682
0.740169
0.727306
0.727306
0.716281
0.706358
0.706358
0
0.008199
0.266853
4,658
115
138
40.504348
0.78858
0.01009
0
0.591398
0
0.064516
0.237936
0
0
0
0
0.008696
0
0
null
null
0
0.021505
null
null
0.096774
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
341cbd6adc055cf143a2fc475c9a965a3a5f3cd9
67
py
Python
tools/jenkins-scripts/configs/jenkins-job-watchdog.py
wzhengsen/engine-x
f398b94a9a5bb9645c16d12d82d6366589db4e21
[ "MIT" ]
113
2020-02-25T03:19:32.000Z
2021-05-17T09:15:40.000Z
tools/jenkins-scripts/configs/jenkins-job-watchdog.py
wzhengsen/engine-x
f398b94a9a5bb9645c16d12d82d6366589db4e21
[ "MIT" ]
172
2020-02-21T08:56:42.000Z
2021-05-12T03:18:40.000Z
tools/jenkins-scripts/configs/jenkins-job-watchdog.py
wzhengsen/engine-x
f398b94a9a5bb9645c16d12d82d6366589db4e21
[ "MIT" ]
62
2020-02-23T14:10:16.000Z
2021-05-14T13:53:19.000Z
import os os.system("python -u tools/jenkins-scripts/watchdog.py")
22.333333
56
0.776119
11
67
4.727273
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.074627
67
2
57
33.5
0.83871
0
0
0
0
0
0.641791
0.492537
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
34326c02316106e96cb02248c362ab2d19c999eb
82
py
Python
welcomescreen.py
SarahSwilem/BabyCrying
9ff34b3baa684357daad4680f45cccade35c510b
[ "MIT" ]
57
2019-05-04T00:22:53.000Z
2022-03-29T22:21:08.000Z
welcomescreen.py
SarahSwilem/BabyCrying
9ff34b3baa684357daad4680f45cccade35c510b
[ "MIT" ]
7
2019-09-13T20:29:43.000Z
2022-03-15T02:55:16.000Z
welcomescreen.py
SarahSwilem/BabyCrying
9ff34b3baa684357daad4680f45cccade35c510b
[ "MIT" ]
25
2019-05-04T00:23:10.000Z
2022-03-30T12:06:32.000Z
from kivy.uix.screenmanager import Screen class WelcomeScreen(Screen): pass
13.666667
41
0.780488
10
82
6.4
0.9
0
0
0
0
0
0
0
0
0
0
0
0.158537
82
5
42
16.4
0.927536
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
34345c3d9cc85df1d8d3fb19988ed1eddefc6dcd
48
py
Python
grid/app/main/routes/__init__.py
pedroespindula/PyGrid
7ef4c4d7720d86086166c1dc8d1af2329da70c3e
[ "Apache-2.0" ]
null
null
null
grid/app/main/routes/__init__.py
pedroespindula/PyGrid
7ef4c4d7720d86086166c1dc8d1af2329da70c3e
[ "Apache-2.0" ]
null
null
null
grid/app/main/routes/__init__.py
pedroespindula/PyGrid
7ef4c4d7720d86086166c1dc8d1af2329da70c3e
[ "Apache-2.0" ]
null
null
null
from .federated import * from .general import *
16
24
0.75
6
48
6
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.166667
48
2
25
24
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
344e6e071f40a481ef24adbdac827d73ca2fa76a
96
py
Python
venv/lib/python3.8/site-packages/cryptography/x509/extensions.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
1
2021-11-07T22:40:27.000Z
2021-11-07T22:40:27.000Z
venv/lib/python3.8/site-packages/cryptography/x509/extensions.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/cryptography/x509/extensions.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/0e/c5/77/3a6e4f49acfa84b22360845d4d07420cd17f6d2d84e6ce13af8699eb89
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.416667
0
96
1
96
96
0.479167
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
cabfaff5981750cecc1532c178ac848b2ca7eb76
1,099
py
Python
020_Valid_Parentheses.py
joshlyman/Josh-LeetCode
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
[ "MIT" ]
null
null
null
020_Valid_Parentheses.py
joshlyman/Josh-LeetCode
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
[ "MIT" ]
null
null
null
020_Valid_Parentheses.py
joshlyman/Josh-LeetCode
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
[ "MIT" ]
null
null
null
class Solution: def isValid(self, s: str) -> bool: stack = [] lookup = {} lookup['('] = ')' lookup['['] = ']' lookup['{'] = '}' for para in s: if para in lookup: stack.append(para) elif len(stack) == 0: return False elif lookup[stack.pop()]!= para: return False return len(stack) == 0 # Time: O(n) # Space: O(n) # V2 class Solution: def isValid(self, s: str) -> bool: stack = [] lookup = {} lookup['('] = ')' lookup['['] = ']' lookup['{'] = '}' for para in s: if para in lookup: stack.append(para) elif len(stack) == 0: return False elif lookup[stack.pop()]!=para: return False # check if stack pop up all items, otherwise will be Falase # return len(stack)==0 if len(stack) ==0: return True else: return False
24.422222
67
0.406733
109
1,099
4.100917
0.330275
0.161074
0.100671
0.100671
0.715884
0.715884
0.715884
0.715884
0.715884
0.715884
0
0.010152
0.462238
1,099
45
68
24.422222
0.746193
0.094631
0
0.878788
0
0
0.012121
0
0
0
0
0
0
1
0.060606
false
0
0
0
0.333333
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1b0f0b6095f1fc9eb7301216781b0667b2f05107
37
py
Python
app/ffmpeg/__init__.py
ihor-pyvovarnyk/oae-sound-processing-tool
602420cd9705997002b6cb9eb86bd09be899bd5d
[ "BSD-2-Clause" ]
null
null
null
app/ffmpeg/__init__.py
ihor-pyvovarnyk/oae-sound-processing-tool
602420cd9705997002b6cb9eb86bd09be899bd5d
[ "BSD-2-Clause" ]
null
null
null
app/ffmpeg/__init__.py
ihor-pyvovarnyk/oae-sound-processing-tool
602420cd9705997002b6cb9eb86bd09be899bd5d
[ "BSD-2-Clause" ]
null
null
null
from .facade import Facade as FFmpeg
18.5
36
0.810811
6
37
5
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.162162
37
1
37
37
0.967742
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1b425e8370f4d70d531eabc9dc8a55135b5c5daa
3,835
py
Python
src/figures.py
mikhail-vlasenko/Tetris-AI
4e9a7bfa02a486e1aa91282058fbee4a88d5ca11
[ "MIT" ]
7
2020-08-12T22:16:09.000Z
2021-12-29T12:20:06.000Z
src/figures.py
FangWenSheng1/Tetris-AI
fbfc6266bebe3e76407a299b1e64aa6d8aae35a3
[ "MIT" ]
6
2020-08-13T01:00:43.000Z
2022-01-17T10:30:51.000Z
src/figures.py
FangWenSheng1/Tetris-AI
fbfc6266bebe3e76407a299b1e64aa6d8aae35a3
[ "MIT" ]
4
2020-08-15T00:14:36.000Z
2022-01-04T01:37:31.000Z
import numpy as np from config import CONFIG array_of_figures = np.array([ [ [[1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0]], [[0, 0, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]] ], [ [[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]] ], [ [[0, 1, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [1, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]] ], [ [[1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 1, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [1, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [0, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0]] ], [ [[0, 0, 1, 0], [1, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [1, 1, 1, 0], [1, 0, 0, 0], [0, 0, 0, 0]], [[1, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]] ], [ [[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 0], [0, 1, 1, 0], [0, 1, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]] ], [ [[0, 1, 1, 0], [1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 1, 1, 0], [1, 1, 0, 0], [0, 0, 0, 0]], [[1, 0, 0, 0], [1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]] ] ]) # 0 - line, 1 - square, 2 - T(flip), 3 - |__, 4 - __|, 5 - -|_,6 - _|- def type_of_figure(arr): figure = [[arr[0][3], arr[0][4], arr[0][5], arr[0][6]], [arr[1][3], arr[1][4], arr[1][5], arr[1][6]]] if figure == [[1, 1, 1, 1], [0, 0, 0, 0]]: return 0 elif figure == [[0, 1, 1, 0], [0, 1, 1, 0]]: return 1 elif figure == [[0, 1, 0, 0], [1, 1, 1, 0]]: return 2 elif figure == [[1, 0, 0, 0], [1, 1, 1, 0]]: return 3 elif figure == [[0, 0, 1, 0], [1, 1, 1, 0]]: return 4 elif figure == [[1, 1, 0, 0], [0, 1, 1, 0]]: return 5 elif figure == [[0, 1, 1, 0], [1, 1, 0, 0]]: return 6 def type_figure_ext(field): piece_idx = type_of_figure(field) if piece_idx is None: piece_idx = type_of_figure(field[1:]) if piece_idx is None: piece_idx = type_of_figure(field[2:]) return piece_idx def piece_weight(figure): weights = [0, 8, 7, 7, 7, 10, 10] # additional score return weights[figure] def find_figure(field, piece: int, exp_x_pos, up_to): possible = [] if CONFIG['debug status'] >= 1: print(f'looking up to {up_to}') for rot in range(len(array_of_figures[piece])): for y_pos in range(up_to): for x_pos in range(exp_x_pos-3, exp_x_pos+4): flag = True for i in range(4): for j in range(4): if array_of_figures[piece][rot][i][j]: if y_pos + i >= len(field) or x_pos + j >= len(field[0]) or y_pos + i < 0 or\ x_pos + j < 0 or not field[y_pos + i][x_pos + j]: flag = False if flag: possible.append([rot, x_pos]) return possible
37.598039
105
0.357497
757
3,835
1.747688
0.088507
0.42328
0.473923
0.471655
0.500378
0.500378
0.452759
0.443689
0.401361
0.401361
0
0.228229
0.368188
3,835
101
106
37.970297
0.317788
0.022164
0
0.122222
0
0
0.008807
0
0
0
0
0
0
1
0.044444
false
0
0.022222
0
0.177778
0.011111
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1b4447883b410cc4233e7a4dc8495e9fbf0490bf
86
py
Python
tests/data/test_indicators.py
JeanMax/babao
65fac36fd726fc5d05d5d8cf7d25e916eae2a373
[ "Beerware" ]
8
2018-01-14T12:08:11.000Z
2021-12-19T22:43:38.000Z
tests/data/test_indicators.py
JeanMax/babao
65fac36fd726fc5d05d5d8cf7d25e916eae2a373
[ "Beerware" ]
5
2019-03-15T07:55:48.000Z
2019-10-01T15:57:14.000Z
tests/data/test_indicators.py
JeanMax/babao
65fac36fd726fc5d05d5d8cf7d25e916eae2a373
[ "Beerware" ]
3
2019-07-12T06:00:39.000Z
2020-02-01T04:41:20.000Z
import babao.utils.indicators as indic # TODO: I'm not sure how to handle data files
21.5
45
0.767442
16
86
4.125
1
0
0
0
0
0
0
0
0
0
0
0
0.174419
86
3
46
28.666667
0.929577
0.5
0
0
0
0
0
0
0
0
0
0.333333
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
1
0
1
0
0
6
1b7f0e0f51f62ed4ff66c9bd67d8b83eed08b88b
123
py
Python
problem_20/factorial_digit_sum.py
plilja/project-euler
646d1989cf15e903ef7e3c6e487284847d522ec9
[ "Apache-2.0" ]
null
null
null
problem_20/factorial_digit_sum.py
plilja/project-euler
646d1989cf15e903ef7e3c6e487284847d522ec9
[ "Apache-2.0" ]
null
null
null
problem_20/factorial_digit_sum.py
plilja/project-euler
646d1989cf15e903ef7e3c6e487284847d522ec9
[ "Apache-2.0" ]
null
null
null
from common.functions import factorial def factorial_digit_sum(n): return sum([int(x) for x in str(factorial(n))])
15.375
51
0.723577
20
123
4.35
0.75
0
0
0
0
0
0
0
0
0
0
0
0.162602
123
7
52
17.571429
0.84466
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
1bd002d13f87d38ac876ce931b5d12fbcfe7bd69
189
py
Python
collections/nemo_nlp/nemo_nlp/transformer/__init__.py
harisankarh/NeMo
27bfb1aed24a786626e1c27c37417ebcd226ca8a
[ "Apache-2.0" ]
1
2019-09-17T03:42:14.000Z
2019-09-17T03:42:14.000Z
collections/nemo_nlp/nemo_nlp/transformer/__init__.py
harisankarh/NeMo
27bfb1aed24a786626e1c27c37417ebcd226ca8a
[ "Apache-2.0" ]
null
null
null
collections/nemo_nlp/nemo_nlp/transformer/__init__.py
harisankarh/NeMo
27bfb1aed24a786626e1c27c37417ebcd226ca8a
[ "Apache-2.0" ]
1
2020-08-25T06:43:34.000Z
2020-08-25T06:43:34.000Z
# Copyright (c) 2019 NVIDIA Corporation from .modules import * from .encoders import * from .decoders import * from .softmax_layers import * from .losses import * from .generators import *
23.625
39
0.761905
24
189
5.958333
0.583333
0.34965
0
0
0
0
0
0
0
0
0
0.025157
0.15873
189
7
40
27
0.874214
0.195767
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
59f204bde1e43f915ce8dd2712f4cbbcf0c82846
95
py
Python
yawhois/parser/whois1_nic_bi.py
huyphan/pyyawhois
77fb2f73a9c67989f1d41d98f37037406a69d136
[ "MIT" ]
null
null
null
yawhois/parser/whois1_nic_bi.py
huyphan/pyyawhois
77fb2f73a9c67989f1d41d98f37037406a69d136
[ "MIT" ]
null
null
null
yawhois/parser/whois1_nic_bi.py
huyphan/pyyawhois
77fb2f73a9c67989f1d41d98f37037406a69d136
[ "MIT" ]
null
null
null
from .base_cocca2 import BaseCocca2Parser class Whois1NicBiParser(BaseCocca2Parser): pass
19
42
0.831579
9
95
8.666667
0.888889
0
0
0
0
0
0
0
0
0
0
0.048193
0.126316
95
4
43
23.75
0.891566
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
847209cf924174c7af3d5db1a4778ca156b3cae7
64
py
Python
tests/_compat.py
igor-simoes/metadata_parser
88fda041a7a65c9ca88c24ca515073219921b254
[ "MIT" ]
null
null
null
tests/_compat.py
igor-simoes/metadata_parser
88fda041a7a65c9ca88c24ca515073219921b254
[ "MIT" ]
null
null
null
tests/_compat.py
igor-simoes/metadata_parser
88fda041a7a65c9ca88c24ca515073219921b254
[ "MIT" ]
null
null
null
from six import PY2 from six.moves.urllib_parse import urlparse
21.333333
43
0.84375
11
64
4.818182
0.727273
0.264151
0
0
0
0
0
0
0
0
0
0.017857
0.125
64
2
44
32
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
849beb456560816172ca2a3d7358f239f5b6a408
6,078
py
Python
db/tracker/migrations/0001_initial.py
sgowda/brain-python-interface
708e2a5229d0496a8ce9de32bda66f0925d366d9
[ "Apache-2.0" ]
7
2015-08-25T00:28:49.000Z
2020-04-14T22:58:51.000Z
db/tracker/migrations/0001_initial.py
sgowda/brain-python-interface
708e2a5229d0496a8ce9de32bda66f0925d366d9
[ "Apache-2.0" ]
89
2020-08-03T16:54:08.000Z
2022-03-09T19:56:19.000Z
db/tracker/migrations/0001_initial.py
sgowda/brain-python-interface
708e2a5229d0496a8ce9de32bda66f0925d366d9
[ "Apache-2.0" ]
4
2016-10-05T17:54:26.000Z
2020-08-06T15:37:09.000Z
# Generated by Django 2.2.1 on 2019-11-12 23:02 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='AutoAlignment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('name', models.TextField()), ], ), migrations.CreateModel( name='Feature', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('visible', models.BooleanField(blank=True, default=True)), ], ), migrations.CreateModel( name='Generator', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('params', models.TextField()), ('static', models.BooleanField()), ('visible', models.BooleanField(blank=True, default=True)), ], ), migrations.CreateModel( name='Sequence', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('name', models.CharField(max_length=128)), ('params', models.TextField()), ('sequence', models.TextField(blank=True)), ('generator', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Generator')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ], ), migrations.CreateModel( name='System', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('path', models.TextField()), ('archive', models.TextField()), ], ), migrations.CreateModel( name='Task', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=128)), ('visible', models.BooleanField(blank=True, default=True)), ], ), migrations.CreateModel( name='TaskEntry', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('params', models.TextField()), ('report', models.TextField()), ('notes', models.TextField()), ('visible', models.BooleanField(blank=True, default=True)), ('backup', models.BooleanField(blank=True, default=False)), ('feats', models.ManyToManyField(to='tracker.Feature')), ('sequence', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='tracker.Sequence')), ('subject', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Subject')), ('task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Task')), ], ), migrations.AddField( model_name='sequence', name='task', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Task'), ), migrations.CreateModel( name='Decoder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('name', models.CharField(max_length=128)), ('path', models.TextField()), ('entry', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.TaskEntry')), ], ), migrations.CreateModel( name='DataFile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('local', models.BooleanField(default=True)), ('archived', models.BooleanField(default=False)), ('path', models.CharField(max_length=256)), ('entry', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='tracker.TaskEntry')), ('system', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.System')), ], ), migrations.CreateModel( name='Calibration', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(auto_now_add=True)), ('name', models.CharField(max_length=128)), ('params', models.TextField()), ('subject', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Subject')), ('system', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.System')), ], ), ]
46.396947
139
0.55742
570
6,078
5.829825
0.142105
0.02889
0.046344
0.072826
0.771893
0.739392
0.739392
0.72585
0.72585
0.708095
0
0.009772
0.292859
6,078
130
140
46.753846
0.763378
0.007404
0
0.682927
1
0
0.090532
0
0
0
0
0
0
1
0
false
0
0.01626
0
0.04878
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ca21ee68b275c77a481e4b7da6c7b814b5c79f1c
20,152
py
Python
sdk/python/pulumi_gcp/diagflow/agent.py
dimpu47/pulumi-gcp
38355de300a5768e11c49d344a8165ba0735deed
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_gcp/diagflow/agent.py
dimpu47/pulumi-gcp
38355de300a5768e11c49d344a8165ba0735deed
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_gcp/diagflow/agent.py
dimpu47/pulumi-gcp
38355de300a5768e11c49d344a8165ba0735deed
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__ = ['Agent'] class Agent(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, api_version: Optional[pulumi.Input[str]] = None, avatar_uri: Optional[pulumi.Input[str]] = None, classification_threshold: Optional[pulumi.Input[float]] = None, default_language_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, enable_logging: Optional[pulumi.Input[bool]] = None, match_mode: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, supported_language_codes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None, tier: Optional[pulumi.Input[str]] = None, time_zone: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ A Dialogflow agent is a virtual agent that handles conversations with your end-users. It is a natural language understanding module that understands the nuances of human language. Dialogflow translates end-user text or audio during a conversation to structured data that your apps and services can understand. You design and build a Dialogflow agent to handle the types of conversations required for your system. To get more information about Agent, see: * [API documentation](https://cloud.google.com/dialogflow/docs/reference/rest/v2/projects/agent) * How-to Guides * [Official Documentation](https://cloud.google.com/dialogflow/docs/) ## Example Usage :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] api_version: API version displayed in Dialogflow console. If not specified, V2 API is assumed. Clients are free to query different service endpoints for different API versions. However, bots connectors and webhook calls will follow the specified API version. * API_VERSION_V1: Legacy V1 API. * API_VERSION_V2: V2 API. * API_VERSION_V2_BETA_1: V2beta1 API. Possible values are `API_VERSION_V1`, `API_VERSION_V2`, and `API_VERSION_V2_BETA_1`. :param pulumi.Input[str] avatar_uri: The URI of the agent's avatar, which are used throughout the Dialogflow console. When an image URL is entered into this field, the Dialogflow will save the image in the backend. The address of the backend image returned from the API will be shown in the [avatarUriBackend] field. :param pulumi.Input[float] classification_threshold: To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a fallback intent will be triggered or, if there are no fallback intents defined, no intent will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used. :param pulumi.Input[str] default_language_code: The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/docs/reference/language) for a list of the currently supported language codes. This field cannot be updated after creation. :param pulumi.Input[str] description: The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected. :param pulumi.Input[str] display_name: The name of this agent. :param pulumi.Input[bool] enable_logging: Determines whether this agent should log conversation queries. :param pulumi.Input[str] match_mode: Determines how intents are detected from user queries. * MATCH_MODE_HYBRID: Best for agents with a small number of examples in intents and/or wide use of templates syntax and composite entities. * MATCH_MODE_ML_ONLY: Can be used for agents with a large number of examples in intents, especially the ones using @sys.any or very large developer entities. Possible values are `MATCH_MODE_HYBRID` and `MATCH_MODE_ML_ONLY`. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[List[pulumi.Input[str]]] supported_language_codes: The list of all languages supported by this agent (except for the defaultLanguageCode). :param pulumi.Input[str] tier: The agent tier. If not specified, TIER_STANDARD is assumed. * TIER_STANDARD: Standard tier. * TIER_ENTERPRISE: Enterprise tier (Essentials). * TIER_ENTERPRISE_PLUS: Enterprise tier (Plus). NOTE: Due to consistency issues, the provider will not read this field from the API. Drift is possible between the the provider state and Dialogflow if the agent tier is changed outside of the provider. :param pulumi.Input[str] time_zone: The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, Europe/Paris. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['api_version'] = api_version __props__['avatar_uri'] = avatar_uri __props__['classification_threshold'] = classification_threshold if default_language_code is None: raise TypeError("Missing required property 'default_language_code'") __props__['default_language_code'] = default_language_code __props__['description'] = description if display_name is None: raise TypeError("Missing required property 'display_name'") __props__['display_name'] = display_name __props__['enable_logging'] = enable_logging __props__['match_mode'] = match_mode __props__['project'] = project __props__['supported_language_codes'] = supported_language_codes __props__['tier'] = tier if time_zone is None: raise TypeError("Missing required property 'time_zone'") __props__['time_zone'] = time_zone __props__['avatar_uri_backend'] = None super(Agent, __self__).__init__( 'gcp:diagflow/agent:Agent', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, api_version: Optional[pulumi.Input[str]] = None, avatar_uri: Optional[pulumi.Input[str]] = None, avatar_uri_backend: Optional[pulumi.Input[str]] = None, classification_threshold: Optional[pulumi.Input[float]] = None, default_language_code: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, enable_logging: Optional[pulumi.Input[bool]] = None, match_mode: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, supported_language_codes: Optional[pulumi.Input[List[pulumi.Input[str]]]] = None, tier: Optional[pulumi.Input[str]] = None, time_zone: Optional[pulumi.Input[str]] = None) -> 'Agent': """ Get an existing Agent resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] api_version: API version displayed in Dialogflow console. If not specified, V2 API is assumed. Clients are free to query different service endpoints for different API versions. However, bots connectors and webhook calls will follow the specified API version. * API_VERSION_V1: Legacy V1 API. * API_VERSION_V2: V2 API. * API_VERSION_V2_BETA_1: V2beta1 API. Possible values are `API_VERSION_V1`, `API_VERSION_V2`, and `API_VERSION_V2_BETA_1`. :param pulumi.Input[str] avatar_uri: The URI of the agent's avatar, which are used throughout the Dialogflow console. When an image URL is entered into this field, the Dialogflow will save the image in the backend. The address of the backend image returned from the API will be shown in the [avatarUriBackend] field. :param pulumi.Input[str] avatar_uri_backend: The URI of the agent's avatar as returned from the API. Output only. To provide an image URL for the agent avatar, the [avatarUri] field can be used. :param pulumi.Input[float] classification_threshold: To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a fallback intent will be triggered or, if there are no fallback intents defined, no intent will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used. :param pulumi.Input[str] default_language_code: The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/docs/reference/language) for a list of the currently supported language codes. This field cannot be updated after creation. :param pulumi.Input[str] description: The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected. :param pulumi.Input[str] display_name: The name of this agent. :param pulumi.Input[bool] enable_logging: Determines whether this agent should log conversation queries. :param pulumi.Input[str] match_mode: Determines how intents are detected from user queries. * MATCH_MODE_HYBRID: Best for agents with a small number of examples in intents and/or wide use of templates syntax and composite entities. * MATCH_MODE_ML_ONLY: Can be used for agents with a large number of examples in intents, especially the ones using @sys.any or very large developer entities. Possible values are `MATCH_MODE_HYBRID` and `MATCH_MODE_ML_ONLY`. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[List[pulumi.Input[str]]] supported_language_codes: The list of all languages supported by this agent (except for the defaultLanguageCode). :param pulumi.Input[str] tier: The agent tier. If not specified, TIER_STANDARD is assumed. * TIER_STANDARD: Standard tier. * TIER_ENTERPRISE: Enterprise tier (Essentials). * TIER_ENTERPRISE_PLUS: Enterprise tier (Plus). NOTE: Due to consistency issues, the provider will not read this field from the API. Drift is possible between the the provider state and Dialogflow if the agent tier is changed outside of the provider. :param pulumi.Input[str] time_zone: The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, Europe/Paris. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["api_version"] = api_version __props__["avatar_uri"] = avatar_uri __props__["avatar_uri_backend"] = avatar_uri_backend __props__["classification_threshold"] = classification_threshold __props__["default_language_code"] = default_language_code __props__["description"] = description __props__["display_name"] = display_name __props__["enable_logging"] = enable_logging __props__["match_mode"] = match_mode __props__["project"] = project __props__["supported_language_codes"] = supported_language_codes __props__["tier"] = tier __props__["time_zone"] = time_zone return Agent(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="apiVersion") def api_version(self) -> pulumi.Output[str]: """ API version displayed in Dialogflow console. If not specified, V2 API is assumed. Clients are free to query different service endpoints for different API versions. However, bots connectors and webhook calls will follow the specified API version. * API_VERSION_V1: Legacy V1 API. * API_VERSION_V2: V2 API. * API_VERSION_V2_BETA_1: V2beta1 API. Possible values are `API_VERSION_V1`, `API_VERSION_V2`, and `API_VERSION_V2_BETA_1`. """ return pulumi.get(self, "api_version") @property @pulumi.getter(name="avatarUri") def avatar_uri(self) -> pulumi.Output[Optional[str]]: """ The URI of the agent's avatar, which are used throughout the Dialogflow console. When an image URL is entered into this field, the Dialogflow will save the image in the backend. The address of the backend image returned from the API will be shown in the [avatarUriBackend] field. """ return pulumi.get(self, "avatar_uri") @property @pulumi.getter(name="avatarUriBackend") def avatar_uri_backend(self) -> pulumi.Output[str]: """ The URI of the agent's avatar as returned from the API. Output only. To provide an image URL for the agent avatar, the [avatarUri] field can be used. """ return pulumi.get(self, "avatar_uri_backend") @property @pulumi.getter(name="classificationThreshold") def classification_threshold(self) -> pulumi.Output[Optional[float]]: """ To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a fallback intent will be triggered or, if there are no fallback intents defined, no intent will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used. """ return pulumi.get(self, "classification_threshold") @property @pulumi.getter(name="defaultLanguageCode") def default_language_code(self) -> pulumi.Output[str]: """ The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/docs/reference/language) for a list of the currently supported language codes. This field cannot be updated after creation. """ return pulumi.get(self, "default_language_code") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected. """ return pulumi.get(self, "description") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[str]: """ The name of this agent. """ return pulumi.get(self, "display_name") @property @pulumi.getter(name="enableLogging") def enable_logging(self) -> pulumi.Output[Optional[bool]]: """ Determines whether this agent should log conversation queries. """ return pulumi.get(self, "enable_logging") @property @pulumi.getter(name="matchMode") def match_mode(self) -> pulumi.Output[str]: """ Determines how intents are detected from user queries. * MATCH_MODE_HYBRID: Best for agents with a small number of examples in intents and/or wide use of templates syntax and composite entities. * MATCH_MODE_ML_ONLY: Can be used for agents with a large number of examples in intents, especially the ones using @sys.any or very large developer entities. Possible values are `MATCH_MODE_HYBRID` and `MATCH_MODE_ML_ONLY`. """ return pulumi.get(self, "match_mode") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @property @pulumi.getter(name="supportedLanguageCodes") def supported_language_codes(self) -> pulumi.Output[Optional[List[str]]]: """ The list of all languages supported by this agent (except for the defaultLanguageCode). """ return pulumi.get(self, "supported_language_codes") @property @pulumi.getter def tier(self) -> pulumi.Output[Optional[str]]: """ The agent tier. If not specified, TIER_STANDARD is assumed. * TIER_STANDARD: Standard tier. * TIER_ENTERPRISE: Enterprise tier (Essentials). * TIER_ENTERPRISE_PLUS: Enterprise tier (Plus). NOTE: Due to consistency issues, the provider will not read this field from the API. Drift is possible between the the provider state and Dialogflow if the agent tier is changed outside of the provider. """ return pulumi.get(self, "tier") @property @pulumi.getter(name="timeZone") def time_zone(self) -> pulumi.Output[str]: """ The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, Europe/Paris. """ return pulumi.get(self, "time_zone") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
57.908046
192
0.671249
2,590
20,152
5.05251
0.130502
0.047073
0.047073
0.028886
0.779765
0.750879
0.733456
0.715192
0.70541
0.70541
0
0.004916
0.253027
20,152
347
193
58.074928
0.864412
0.55786
0
0.272727
1
0
0.141229
0.039172
0
0
0
0
0
1
0.11039
false
0.006494
0.032468
0.012987
0.253247
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ca27159be19ad65235e4fa6c130f3a1a14b6f8be
238
py
Python
brick_server/minimal/interfaces/actuation/dummy_actuation.py
BrickSchema/brick-example-server
2e184dd96ee14b4a4c14189b5bea9989a9befbbf
[ "BSD-3-Clause" ]
3
2021-12-10T17:08:30.000Z
2022-02-10T04:43:35.000Z
brick_server/minimal/interfaces/actuation/dummy_actuation.py
BrickSchema/brick-example-server
2e184dd96ee14b4a4c14189b5bea9989a9befbbf
[ "BSD-3-Clause" ]
13
2021-12-04T02:23:07.000Z
2022-02-07T23:49:51.000Z
brick_server/minimal/interfaces/actuation/dummy_actuation.py
BrickSchema/brick-example-server
2e184dd96ee14b4a4c14189b5bea9989a9befbbf
[ "BSD-3-Clause" ]
4
2021-12-30T21:59:02.000Z
2022-03-15T16:36:54.000Z
from brick_server.minimal.interfaces.actuation.base_actuation import BaseActuation class DummyActuation(BaseActuation): def __init__(self, *args, **kwargs): pass def actuate(self, entity_id, value): return True
23.8
82
0.731092
27
238
6.185185
0.851852
0
0
0
0
0
0
0
0
0
0
0
0.184874
238
9
83
26.444444
0.860825
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.166667
0.166667
0.166667
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
6
ca6af42382df1dee755307eff1802eaeb4f71e77
10,024
py
Python
tests/test_state.py
clu-ling/clu-phontools
304510150c6f9a4b0e1372bc9275630b7f976aeb
[ "Apache-2.0" ]
null
null
null
tests/test_state.py
clu-ling/clu-phontools
304510150c6f9a4b0e1372bc9275630b7f976aeb
[ "Apache-2.0" ]
3
2021-06-15T23:32:30.000Z
2021-09-01T18:49:20.000Z
tests/test_state.py
clu-ling/clu-phontools
304510150c6f9a4b0e1372bc9275630b7f976aeb
[ "Apache-2.0" ]
1
2021-06-18T05:48:29.000Z
2021-06-18T05:48:29.000Z
# -*- coding: utf-8 -*- import unittest from clu.phontools.alignment.parser import * """ Test behavior of State """ class StateTests(unittest.TestCase): # for test purposes gold_a = Symbol( symbol="a", original_index=0, index=0, source=TranscriptTypes.GOLD, ) gold_b = Symbol( symbol="b", original_index=1, index=1, source=TranscriptTypes.GOLD, ) trans_a = Symbol( symbol="a", original_index=0, index=0, source=TranscriptTypes.TRANSCRIPT, ) trans_b = Symbol( symbol="b", original_index=1, index=1, source=TranscriptTypes.TRANSCRIPT, ) # Actions def test_ALIGN(self): """`clu.phontools.alignment.parser.state.State` should support Actions.ALIGN.""" state = State( stack=Stack( [ # top of stack StateTests.trans_a, StateTests.gold_a, ] ), gold_queue=Queue([]), transcribed_queue=Queue([]), gold_graph=None, current_graph=Graph(edges=[]), ) valid_actions = state.valid_actions() ACTION = Actions.ALIGN new_state = state.perform_action(ACTION) self.assertTrue( Actions.ALIGN in valid_actions, f"state should support Actions.ALIGN, but only the following were present: {valid_actions}.", ) self.assertTrue( state.is_valid(ACTION), f"configured state should allow ALIGN action when top two items on stack are from GOLD and TRANSCRIPT", ) self.assertTrue( len(new_state.current_graph.edges) == 1, f"new_state should contain a single edge, but {len(new_state.current_graph.edges)} found.", ) self.assertEqual( new_state.last_action(), ACTION, f"new_state.last_action() should be {ACTION}, but was {new_state.last_action()}", ) self.assertTrue( new_state.current_graph.edges[0].label == ACTION, f"label of single edge in new_state.current_state should be {ACTION}, but label was {new_state.current_graph.edges[0].label}", ) edge = new_state.current_graph.edges[0] self.assertEqual( edge.destination.source, TranscriptTypes.GOLD, f"ALIGN must point from TRANSCRIPT -> GOLD", ) problem_stack = Stack([StateTests.gold_a, StateTests.gold_b]) bad_state = state.copy(stack=problem_stack) self.assertFalse( bad_state.is_valid(ACTION), f"state should NOT allow ALIGN action when top two items on stack are both from GOLD", ) def test_STACK_SWAP(self): """`clu.phontools.alignment.parser.state.State` should support Actions.STACK_SWAP.""" first_ps = StateTests.trans_b second_ps = StateTests.trans_a state = State( stack=Stack( [ # top of stack first_ps, second_ps, ] ), gold_queue=Queue(), transcribed_queue=Queue(), gold_graph=None, current_graph=Graph(edges=[]), ) valid_actions = state.valid_actions() ACTION = Actions.STACK_SWAP new_state = state.perform_action(Actions.STACK_SWAP) self.assertTrue( ACTION in valid_actions, f"state should support {ACTION}, but only the following were present: {valid_actions}.", ) self.assertTrue( state.is_valid(ACTION), f"configured state should allow STACK_SWAP action when there are >= 2 items on stack", ) self.assertTrue( len(new_state.current_graph.edges) == 0, f"new_state should not contain any edges, but {len(new_state.current_graph.edges)} found.", ) self.assertEqual( new_state.last_action(), ACTION, f"new_state.last_action() should be {ACTION}, but was {new_state.last_action()}", ) top = new_state.stack.pop() self.assertEqual( top, second_ps, f"first item in stack of new_stack should now be 'a', but {top.symbol} found.", ) problem_stack = Stack([StateTests.gold_a]) bad_state = state.copy(stack=problem_stack) self.assertFalse( bad_state.is_valid(ACTION), f"state should NOT allow STACK_SWAP action when < 2 items on stack", ) def test_SHIFT_G(self): """`clu.phontools.alignment.parser.state.State` should support Actions.SHIFT_G.""" state = State( stack=Stack(), gold_queue=Queue([StateTests.gold_a]), transcribed_queue=Queue(), gold_graph=None, current_graph=Graph(edges=[]), ) valid_actions = state.valid_actions() ACTION = Actions.SHIFT_G new_state = state.perform_action(ACTION) self.assertTrue( ACTION in valid_actions, f"state should support Actions.SHIFT_G, but only the following were present: {valid_actions}.", ) self.assertTrue( state.is_valid(ACTION), f"configured state should allow SHIFT_G action when there are > 0 items on gold_queue", ) self.assertTrue( len(new_state.current_graph.edges) == 0, f"new_state should not contain any edges, but {len(new_state.current_graph.edges)} found.", ) self.assertEqual( new_state.last_action(), ACTION, f"new_state.last_action() should be {ACTION}, but was {new_state.last_action()}", ) top = new_state.stack.pop() self.assertEqual( top, StateTests.gold_a, f"first item in stack of new_stack should now be 'a', but {top.symbol} found.", ) problem_queue = Queue() bad_state = state.copy(gold_queue=problem_queue) self.assertFalse( bad_state.is_valid(ACTION), f"state should NOT allow SHIFT_G action when < 1 items on gold_queue", ) def test_SHIFT_T(self): """`clu.phontools.alignment.parser.state.State` should support Actions.SHIFT_T.""" state = State( stack=Stack(), gold_queue=Queue(), transcribed_queue=Queue([StateTests.trans_a]), gold_graph=None, current_graph=Graph(edges=[]), ) valid_actions = state.valid_actions() ACTION = Actions.SHIFT_T new_state = state.perform_action(ACTION) self.assertTrue( ACTION in valid_actions, f"state should support {ACTION}, but only the following were present: {valid_actions}.", ) self.assertTrue( state.is_valid(ACTION), f"configured state should allow {ACTION} action when there are > 0 items on transcribed_queue", ) self.assertTrue( len(new_state.current_graph.edges) == 0, f"new_state should not contain any edges, but {len(new_state.current_graph.edges)} found.", ) self.assertEqual( new_state.last_action(), ACTION, f"new_state.last_action() should be {ACTION}, but was {new_state.last_action()}", ) top = new_state.stack.pop() self.assertEqual( top, StateTests.trans_a, f"first item in stack of new_stack should now be 'a', but {top.symbol} found.", ) problem_queue = Queue() bad_state = state.copy(transcribed_queue=problem_queue) self.assertFalse( bad_state.is_valid(ACTION), f"state should NOT allow {ACTION} action when < 1 items on transcribed_queue", ) # def test_INSERTION_PRESERVE_CHILD(self): # """`clu.phontools.alignment.parser.state.State` should support Actions.INSERTION_PRESERVE_CHILD.""" # ACTION = Actions.INSERTION_PRESERVE_CHILD # stack = Stack() # stack.push(StateTests.trans_b) # stack.push(StateTests.gold_a) # state = State( # stack=stack, # gold_queue=Queue(), # transcribed_queue=Queue(), # gold_graph=None, # current_graph=Graph(edges=[]), # ) # valid_actions = state.valid_actions() # new_state = state.perform_action(ACTION) # self.assertTrue( # ACTION in valid_actions, # f"state should support {ACTION}, but only the following were present: {valid_actions}.", # ) # self.assertTrue( # state.is_valid(ACTION), # f"configured state should allow {ACTION} action when there are > 0 items on Stack and both are from gold and transcribed", # ) # self.assertTrue( # len(new_state.current_graph.edges) == 1, # f"new_state should contain 1 edge, but {len(new_state.current_graph.edges)} found.", # ) # self.assertEqual( # new_state.last_action(), # ACTION, # f"new_state.last_action() should be {ACTION}, but was {new_state.last_action()}", # ) # top = new_state.stack.pop() # self.assertEqual( # top, # StateTests.gold_a, # f"first item in stack of new_stack should now be 'a', but {top.symbol} found.", # ) # problem_stack = Stack([StateTests.gold_a, StateTests.gold_b]) # bad_state = state.copy(stack=problem_stack) # self.assertFalse( # bad_state.is_valid(ACTION), # f"state should NOT allow {ACTION} action when stack is {problem_stack._symbols}", # )
32.75817
138
0.573524
1,137
10,024
4.868074
0.091469
0.06215
0.03252
0.04878
0.830714
0.810117
0.804155
0.771274
0.754472
0.754472
0
0.003703
0.326417
10,024
305
139
32.865574
0.816055
0.205706
0
0.591133
0
0.004926
0.258934
0.050744
0
0
0
0
0.123153
1
0.019704
false
0
0.009852
0
0.054187
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
046cb1fb02943d2e78e05f6ed0b63afbe5163b26
22
py
Python
cocos/numerics/linalg/__init__.py
michaelnowotny/cocos
3c34940d7d9eb8592a97788a5df84b8d472f2928
[ "MIT" ]
101
2019-03-30T05:23:01.000Z
2021-11-27T09:09:40.000Z
cocos/numerics/linalg/__init__.py
michaelnowotny/cocos
3c34940d7d9eb8592a97788a5df84b8d472f2928
[ "MIT" ]
3
2019-04-17T06:04:12.000Z
2020-12-14T17:36:01.000Z
cocos/numerics/linalg/__init__.py
michaelnowotny/cocos
3c34940d7d9eb8592a97788a5df84b8d472f2928
[ "MIT" ]
5
2020-02-07T14:29:50.000Z
2020-12-09T17:54:07.000Z
from ._linalg import *
22
22
0.772727
3
22
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.136364
22
1
22
22
0.842105
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
048afde21d26c9f0fdd6944fcb2104efd880b0b7
223
py
Python
django_simple_task/__init__.py
raratiru/django-simple-task
40af091916c88167cd85fadf76e85d3486b4061b
[ "MIT" ]
97
2019-12-29T17:59:26.000Z
2022-03-19T03:09:02.000Z
django_simple_task/__init__.py
raratiru/django-simple-task
40af091916c88167cd85fadf76e85d3486b4061b
[ "MIT" ]
13
2019-12-30T22:40:50.000Z
2021-09-22T18:19:40.000Z
django_simple_task/__init__.py
raratiru/django-simple-task
40af091916c88167cd85fadf76e85d3486b4061b
[ "MIT" ]
6
2020-01-03T09:39:06.000Z
2021-06-24T11:56:38.000Z
from .task import defer from .middleware import django_simple_task_middlware __all__ = ["defer", "django_simple_task_middlware"] __version__ = "0.1.1" default_app_config = "django_simple_task.apps.DjangoSimpleTaskConfig"
27.875
69
0.820628
29
223
5.689655
0.586207
0.218182
0.290909
0.30303
0
0
0
0
0
0
0
0.014778
0.089686
223
7
70
31.857143
0.79803
0
0
0
0
0
0.376682
0.331839
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
04933d91220a5948cf18a707ff4a9f4d1717bcc1
219
py
Python
src/quiltz/domain/id/testbuilders.py
qwaneu/quiltz-domain
3b487c8396c89f653b7aa42b9d34f59baa3ace09
[ "MIT" ]
null
null
null
src/quiltz/domain/id/testbuilders.py
qwaneu/quiltz-domain
3b487c8396c89f653b7aa42b9d34f59baa3ace09
[ "MIT" ]
null
null
null
src/quiltz/domain/id/testbuilders.py
qwaneu/quiltz-domain
3b487c8396c89f653b7aa42b9d34f59baa3ace09
[ "MIT" ]
null
null
null
from quiltz.domain.id import ID import uuid def aValidUUID(simpleIdValue): return uuid.UUID("{:>32}".format(simpleIdValue).replace(' ', '1')) def aValidID(simpleIdValue): return ID(aValidUUID(simpleIdValue))
24.333333
70
0.730594
26
219
6.153846
0.576923
0.1
0
0
0
0
0
0
0
0
0
0.015625
0.123288
219
8
71
27.375
0.817708
0
0
0
0
0
0.03653
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
0498f5d3a9aa46dbf6292c272f71ad5ec5778520
3,323
py
Python
pirates/leveleditor/worldData/CaveATemplate.py
Willy5s/Pirates-Online-Rewritten
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
[ "BSD-3-Clause" ]
81
2018-04-08T18:14:24.000Z
2022-01-11T07:22:15.000Z
pirates/leveleditor/worldData/CaveATemplate.py
Willy5s/Pirates-Online-Rewritten
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
[ "BSD-3-Clause" ]
4
2018-09-13T20:41:22.000Z
2022-01-08T06:57:00.000Z
pirates/leveleditor/worldData/CaveATemplate.py
Willy5s/Pirates-Online-Rewritten
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
[ "BSD-3-Clause" ]
26
2018-05-26T12:49:27.000Z
2021-09-11T09:11:59.000Z
from pandac.PandaModules import Point3, VBase3 objectStruct = {'Objects': {'1172185213.66sdnaik': {'Type': 'Island Game Area','Name': 'CaveATemplate','File': '','Instanced': True,'Objects': {'1172185301.05sdnaik': {'Type': 'Locator Node','Name': 'portal_interior_1','Hpr': VBase3(-145.119, -1.51, 0.556),'Pos': Point3(295.633, 137.404, 2.838),'Scale': VBase3(1.0, 1.0, 1.0)},'1176742904.52dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '120.0000','DropOff': '13.6364','Flickering': False,'Hpr': VBase3(4.687, -31.475, 59.072),'Intensity': '1.3636','LightType': 'SPOT','Pos': Point3(521.814, -432.777, 75.891),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1176743350.06dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '120.0000','DropOff': '13.6364','Flickering': False,'Hpr': VBase3(161.03, -22.852, -75.005),'Intensity': '0.0000','LightType': 'SPOT','Pos': Point3(516.56, 93.97, 101.222),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1176743507.13dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '120.0000','DropOff': '13.6364','Flickering': False,'Hpr': VBase3(-71.344, -32.039, 81.741),'Intensity': '1.5758','LightType': 'SPOT','Pos': Point3(100.408, -361.875, 30.748),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1176744538.92dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '12.2727','Flickering': False,'Hpr': VBase3(-10.265, 0.0, -1.456),'Intensity': '0.3030','LightType': 'POINT','Pos': Point3(541.627, -139.742, 90.96),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1176745540.34dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','Flickering': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Intensity': '0.1818','LightType': 'AMBIENT','Pos': Point3(391.643, -257.896, 17.21),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1176758375.78dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','Flickering': False,'Hpr': VBase3(0.0, 0.0, 0.0),'Intensity': '0.1818','LightType': 'AMBIENT','Pos': Point3(435.448, -334.458, 6.826),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}}},'Visual': {'Model': 'models/caves/cave_a_zero'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1172185213.66sdnaik': '["Objects"]["1172185213.66sdnaik"]','1172185301.05sdnaik': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1172185301.05sdnaik"]','1176742904.52dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176742904.52dzlu"]','1176743350.06dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176743350.06dzlu"]','1176743507.13dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176743507.13dzlu"]','1176744538.92dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176744538.92dzlu"]','1176745540.34dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176745540.34dzlu"]','1176758375.78dzlu': '["Objects"]["1172185213.66sdnaik"]["Objects"]["1176758375.78dzlu"]'}}
1,661.5
3,276
0.651821
467
3,323
4.603854
0.310493
0.019535
0.019535
0.026047
0.43814
0.43814
0.43814
0.43814
0.430233
0.430233
0
0.24984
0.05808
3,323
2
3,276
1,661.5
0.437061
0
0
0
0
0
0.573406
0.207581
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
b6d5af8361f1f610d25d386e5bb4ab71d1e4749a
33
py
Python
eod/models/heads/roi_head/__init__.py
Helicopt/EOD
b5db36f4ce267bf64d093b8174bde2c4097b4718
[ "Apache-2.0" ]
196
2021-10-30T05:15:36.000Z
2022-03-30T18:43:40.000Z
eod/tasks/det/models/heads/roi_head/__init__.py
YZW-explorer/EOD
f10e64de86c0f356ebf5c7e923f4042eec4207b1
[ "Apache-2.0" ]
12
2021-10-30T11:33:28.000Z
2022-03-31T14:22:58.000Z
eod/tasks/det/models/heads/roi_head/__init__.py
YZW-explorer/EOD
f10e64de86c0f356ebf5c7e923f4042eec4207b1
[ "Apache-2.0" ]
23
2021-11-01T07:26:17.000Z
2022-03-27T05:55:37.000Z
from .retina_head import * # noqa
33
33
0.757576
5
33
4.8
1
0
0
0
0
0
0
0
0
0
0
0
0.151515
33
1
33
33
0.857143
0.121212
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b6dd4523f402111c25204b4a5556e2bad3f8a96e
41
py
Python
numsim/computer/__init__.py
ffernandoalves/NumSim
44544cfa6a451835efafbc847780fdcb8ad9081c
[ "MIT" ]
1
2021-05-26T07:14:21.000Z
2021-05-26T07:14:21.000Z
numsim/computer/__init__.py
ffernandoalves/NumSim
44544cfa6a451835efafbc847780fdcb8ad9081c
[ "MIT" ]
null
null
null
numsim/computer/__init__.py
ffernandoalves/NumSim
44544cfa6a451835efafbc847780fdcb8ad9081c
[ "MIT" ]
null
null
null
from .velocity_verlet import init_verlet
20.5
40
0.878049
6
41
5.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.097561
41
1
41
41
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f3f8c171466b7d420a086e8cf4441abbd54afddd
203
py
Python
app/gws/ext/layer/postgres/__init__.py
ewie/gbd-websuite
6f2814c7bb64d11cb5a0deec712df751718fb3e1
[ "Apache-2.0" ]
null
null
null
app/gws/ext/layer/postgres/__init__.py
ewie/gbd-websuite
6f2814c7bb64d11cb5a0deec712df751718fb3e1
[ "Apache-2.0" ]
null
null
null
app/gws/ext/layer/postgres/__init__.py
ewie/gbd-websuite
6f2814c7bb64d11cb5a0deec712df751718fb3e1
[ "Apache-2.0" ]
null
null
null
import gws.ext.db.provider.postgres.layer class Config(gws.ext.db.provider.postgres.layer.Config): """Postgres layer""" pass class Object(gws.ext.db.provider.postgres.layer.Object): pass
18.454545
56
0.729064
29
203
5.103448
0.37931
0.351351
0.162162
0.324324
0.587838
0.587838
0
0
0
0
0
0
0.128079
203
10
57
20.3
0.836158
0.068966
0
0.4
0
0
0
0
0
0
0
0
0
1
0
true
0.4
0.2
0
0.6
0
1
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
6d02e2d8fe53614e43174882389a0e2f26584819
4,874
py
Python
03_posenet/02_posenet_v2/01_float32/06_float16_quantization_resnet.py
khanfarhan10/PINTO_model_zoo
4cad2e506d8c0fb604aa7b5f84115a840ab59ba1
[ "MIT" ]
1,529
2019-12-11T13:36:23.000Z
2022-03-31T18:38:27.000Z
03_posenet/02_posenet_v2/01_float32/06_float16_quantization_resnet.py
khanfarhan10/PINTO_model_zoo
4cad2e506d8c0fb604aa7b5f84115a840ab59ba1
[ "MIT" ]
200
2020-01-06T09:24:42.000Z
2022-03-31T17:29:08.000Z
03_posenet/02_posenet_v2/01_float32/06_float16_quantization_resnet.py
khanfarhan10/PINTO_model_zoo
4cad2e506d8c0fb604aa7b5f84115a840ab59ba1
[ "MIT" ]
288
2020-02-21T14:56:02.000Z
2022-03-30T03:00:35.000Z
import tensorflow as tf import tensorflow_datasets as tfds import numpy as np from PIL import Image import os import glob # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_16_225') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_16_225_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_16_225_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_16_257') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_16_257_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_16_257_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_16_321') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_16_321_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_16_321_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_16_385') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_16_385_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_16_385_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_16_513') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_16_513_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_16_513_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_32_225') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_32_225_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_32_225_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_32_257') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_32_257_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_32_257_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_32_321') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_32_321_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_32_321_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_32_385') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_32_385_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_32_385_float16_quant.tflite") # Integer Quantization - Input/Output=float32 converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_posenet_resnet50_32_513') converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_types = [tf.float16] tflite_quant_model = converter.convert() with open('posenet_resnet50_32_513_float16_quant.tflite', 'wb') as w: w.write(tflite_quant_model) print("Integer Quantization complete! - posenet_resnet50_32_513_float16_quant.tflite")
49.232323
91
0.829298
661
4,874
5.765507
0.075643
0.118079
0.083967
0.07872
0.973498
0.973498
0.973498
0.9672
0.9672
0.9672
0
0.064131
0.07222
4,874
99
92
49.232323
0.778638
0.09007
0
0.526316
0
0
0.357062
0.277966
0
0
0
0
0
1
0
false
0
0.078947
0
0.078947
0.131579
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
6d26e2e022d1bc0adfc5d8d6f758de3c3a397266
31
py
Python
plugins/encrypt-content/encrypt_content/__init__.py
mohnjahoney/website_source
edc86a869b90ae604f32e736d9d5ecd918088e6a
[ "MIT" ]
23
2015-05-15T18:44:27.000Z
2021-10-09T16:35:47.000Z
plugins/encrypt-content/encrypt_content/__init__.py
mohnjahoney/website_source
edc86a869b90ae604f32e736d9d5ecd918088e6a
[ "MIT" ]
29
2020-03-22T06:57:57.000Z
2022-01-24T22:46:42.000Z
plugins/encrypt-content/encrypt_content/__init__.py
mohnjahoney/website_source
edc86a869b90ae604f32e736d9d5ecd918088e6a
[ "MIT" ]
11
2015-09-17T12:04:33.000Z
2021-08-03T01:21:05.000Z
from .encrypt_content import *
15.5
30
0.806452
4
31
6
1
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
1
31
31
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6d27f8ff791055d730edc3c616e88a01c9992ff6
21
py
Python
packages/python/b3api/__init__.py
mariotaddeucci/b3api
f2a7fd926b4f38cf43f8632a63bd7fbddcab6caf
[ "MIT" ]
null
null
null
packages/python/b3api/__init__.py
mariotaddeucci/b3api
f2a7fd926b4f38cf43f8632a63bd7fbddcab6caf
[ "MIT" ]
null
null
null
packages/python/b3api/__init__.py
mariotaddeucci/b3api
f2a7fd926b4f38cf43f8632a63bd7fbddcab6caf
[ "MIT" ]
null
null
null
from . import assets
10.5
20
0.761905
3
21
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.190476
21
1
21
21
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6d4a8328758d9de01e6ec45467f00c89d12ec6fe
7,761
py
Python
AutomatedTesting/Gem/PythonTests/largeworlds/gradient_signal/test_GradientSurfaceTagEmitter.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
1
2021-08-08T19:54:51.000Z
2021-08-08T19:54:51.000Z
AutomatedTesting/Gem/PythonTests/largeworlds/gradient_signal/test_GradientSurfaceTagEmitter.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
2
2022-01-13T04:29:38.000Z
2022-03-12T01:05:31.000Z
AutomatedTesting/Gem/PythonTests/largeworlds/gradient_signal/test_GradientSurfaceTagEmitter.py
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
null
null
null
""" Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT """ import os import pytest import logging # Bail on the test if ly_test_tools doesn't exist. pytest.importorskip("ly_test_tools") import ly_test_tools.environment.file_system as file_system import editor_python_test_tools.hydra_test_utils as hydra logger = logging.getLogger(__name__) test_directory = os.path.join(os.path.dirname(__file__), "EditorScripts") @pytest.mark.parametrize("project", ["AutomatedTesting"]) @pytest.mark.parametrize("level", ["tmp_level"]) @pytest.mark.usefixtures("automatic_process_killer") @pytest.mark.parametrize("launcher_platform", ['windows_editor']) class TestGradientSurfaceTagEmitter(object): @pytest.fixture(autouse=True) def setup_teardown(self, request, workspace, project, level): # Cleanup temp level before and after test runs def teardown(): file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True) request.addfinalizer(teardown) file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True) @pytest.mark.test_case_id("C3297302") @pytest.mark.SUITE_periodic def test_GradientSurfaceTagEmitter_ComponentDependencies(self, request, editor, level, workspace, launcher_platform): cfg_args = [level] expected_lines = [ "GradientSurfaceTagEmitter_ComponentDependencies: test started", "GradientSurfaceTagEmitter_ComponentDependencies: Gradient Surface Tag Emitter is Disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Dither Gradient Modifier and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Gradient Mixer and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Invert Gradient Modifier and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Levels Gradient Modifier and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Posterize Gradient Modifier and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Smooth-Step Gradient Modifier and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Threshold Gradient Modifier and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Altitude Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Constant Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: FastNoise Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Image Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Perlin Noise Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Random Noise Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Reference Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Shape Falloff Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Slope Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: Surface Mask Gradient and Gradient Surface Tag Emitter are enabled", "GradientSurfaceTagEmitter_ComponentDependencies: result=SUCCESS", ] unexpected_lines = [ "GradientSurfaceTagEmitter_ComponentDependencies: Gradient Surface Tag Emitter is Enabled, but should be Disabled without dependencies met", "GradientSurfaceTagEmitter_ComponentDependencies: Dither Gradient Modifier and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Gradient Mixer and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Invert Gradient Modifier and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Levels Gradient Modifier and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Posterize Gradient Modifier and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Smooth-Step Gradient Modifier and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Threshold Gradient Modifier and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Altitude Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Constant Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: FastNoise Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Image Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Perlin Noise Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Random Noise Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Reference Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Shape Falloff Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Slope Gradient and Gradient Surface Tag Emitter are disabled", "GradientSurfaceTagEmitter_ComponentDependencies: Surface Mask Gradient and Gradient Surface Tag Emitter are disabled", ] hydra.launch_and_validate_results( request, test_directory, editor, "GradientSurfaceTagEmitter_ComponentDependencies.py", expected_lines=expected_lines, unexpected_lines=unexpected_lines, cfg_args=cfg_args ) @pytest.mark.test_case_id("C3297303") @pytest.mark.SUITE_periodic def test_GradientSurfaceTagEmitter_SurfaceTagsAddRemoveSuccessfully(self, request, editor, level, launcher_platform): expected_lines = [ "Entity has a Gradient Surface Tag Emitter component", "Entity has a Reference Gradient component", "Added SurfaceTag: container count is 1", "Removed SurfaceTag: container count is 0", "GradientSurfaceTagEmitter_SurfaceTagsAddRemoveSucessfully: result=SUCCESS" ] hydra.launch_and_validate_results( request, test_directory, editor, "GradientSurfaceTagEmitter_SurfaceTagsAddRemoveSuccessfully.py", expected_lines, cfg_args=[level] )
66.333333
153
0.744234
719
7,761
7.887344
0.218359
0.324458
0.11744
0.163111
0.739023
0.73197
0.73197
0.686828
0.685417
0.682772
0
0.003078
0.204613
7,761
116
154
66.905172
0.9156
0.038011
0
0.191489
0
0
0.644495
0.270484
0
0
0
0
0
1
0.042553
false
0
0.06383
0
0.117021
0
0
0
0
null
1
0
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ed9199e4186e4d39f642aeed0ba50d666d892379
34,394
py
Python
differentiable_filters/utils/push_utils.py
akloss/differentiable_filters
821889dec411927658c6ef7dd01c9028d2f28efd
[ "MIT" ]
14
2021-01-10T10:44:31.000Z
2022-03-28T07:46:49.000Z
differentiable_filters/utils/push_utils.py
brentyi/differentiable_filters
7ae1f5022a9f5cf9485cb7748cadf0f0d65c01bd
[ "MIT" ]
null
null
null
differentiable_filters/utils/push_utils.py
brentyi/differentiable_filters
7ae1f5022a9f5cf9485cb7748cadf0f0d65c01bd
[ "MIT" ]
7
2021-01-13T12:38:36.000Z
2022-03-06T16:49:43.000Z
""" Analytical functions for computing the process model of the planar pushing task and for projection between pixels and 3d world-coordinates """ import tensorflow as tf import numpy as np def physical_model(xos, contact_points, normals, actions, friction, mu, contact): """ Predict the outcome of a single pushing action given the current state of the object Parameters ---------- xos : tensor The position of the object's center of mass contact_points : tensor The position of the contact point between pusher and object normals : tensor The normal to the object surface at the contact point actions : tensor The pusher movement (in x and y direction) friction : tensor A friction-related parameter mu : tensor Friction coefficient between pusher and object contact : tensor Indicates if the pusher is in contact with the object at all Returns ------- tr : tensor The translation of the object in x and y direction rot : tensor The rotation of the object keep_contact : tensor If the pusher will still be in contact with the object after the push """ # softly binarize the contact contact = tf.where(tf.greater_equal(contact, 0.5), tf.ones_like(contact), tf.zeros_like(contact)) # contact = tf.nn.sigmoid((40*contact)-20) contact = tf.reshape(contact, [-1, 1]) # upscale the friction parameter to its coorect value fr = friction * 100. with tf.variable_scope('prediction'): # first calculate the distance between the contact point and # the object r = contact_points - xos rx = tf.slice(r, [0, 0], [-1, 1]) ry = tf.slice(r, [0, 1], [-1, 1]) vp, keep_contact = \ get_contact_mode(rx, ry, actions, fr, mu, normals, contact) dx, dy, rot = get_vel_model(vp, rx, ry, fr) tr = tf.concat([dx, dy], axis=-1) * contact rot = rot * contact return tr, rot, tf.reshape(keep_contact, [-1, 1]) def get_vel_model(vp, rx, ry, fr): """ Given an effective push, predict the translation and rotation of the object Parameters ---------- vp : tensor Effective push rx : tensor x-coordinate of the contact point ry : tensor y-coordinate of the contact point fr : tensor Friction related parameter Returns ------- tx : tensor Translation in x ty : tensor Translation in y rot : tensor Object rotation. """ with tf.variable_scope('calculate_velocity'): ux = tf.slice(vp, [0, 0], [-1, 1]) uy = tf.slice(vp, [0, 1], [-1, 1]) rx2 = tf.square(rx) ry2 = tf.square(ry) div = fr + rx2 + ry2 tx_tmp = tf.multiply((fr + rx2), ux) + \ tf.multiply(rx, tf.multiply(ry, uy)) tx = tf.divide(tx_tmp, div) ty_tmp = tf.multiply((fr + ry2), uy) + \ tf.multiply(rx, tf.multiply(ry, ux)) ty = tf.divide(ty_tmp, div) rot_tmp = tf.multiply(rx, ty) - tf.multiply(ry, tx) rot = tf.divide(rot_tmp, fr) return tx, ty, rot def get_contact_mode(rx, ry, action, fr2, mu, normal, contact): """ Determines the contact mode (sticking or sliding) and the effective push Parameters ---------- rx : tensor x-coordinate of the contact point ry : tensor y-coordinate of the contact point action : tensor The pusher movement (in x and y direction) fr2 : tensor Squared version of the friction related parameter friction : tensor A friction-related parameter mu : tensor Friction coefficient between pusher and object contact : tensor Indicates if the pusher is in contact with the object at all Returns ------- vp_out : tensor the effective push keep_contact : tensor If the pusher will still be in contact with the object after the push """ # for calculate the boundary forces of the friction cone ang = mu/180.*np.pi normal_norm = tf.linalg.norm(normal, axis=-1) ok_normal = tf.greater(normal_norm, 1e-6) # if we don't have a normal, we simulate one to prevent nans normal_t = tf.where(ok_normal, normal, normal + tf.ones_like(normal)) normal_t = normal_t/tf.linalg.norm(normal_t, axis=-1, keepdims=True) nx = tf.slice(normal_t, [0, 0], [-1, 1]) ny = tf.slice(normal_t, [0, 1], [-1, 1]) # check if the normal points towards the object dir_center = - tf.concat([rx, ry], axis=-1) dir_center_norm = tf.linalg.norm(dir_center, axis=-1, keepdims=True) dir_center = tf.where(tf.greater(tf.squeeze(dir_center_norm), 0.), dir_center/dir_center_norm, dir_center) prod = tf.matmul(tf.reshape(dir_center, [-1, 1, 2]), tf.reshape(normal_t, [-1, 2, 1])) # prevent nans if prod is slightly higher than 1 due to numerics prod = tf.clip_by_value(prod, -0.999999999, 0.999999999) n_ang = tf.acos(tf.reshape(prod, [-1])) # if the angle is greater than 90 degree, the normal is incorrect ok_normal = tf.logical_and(ok_normal, tf.less(tf.abs(n_ang), np.pi/2.+0.1)) # same for the push push_norm = tf.linalg.norm(action, axis=-1) push = tf.greater(push_norm, 1e-6) action_t = tf.where(push, tf.identity(action), action + tf.ones_like(action)) action_t = action_t/tf.linalg.norm(action_t, axis=-1, keepdims=True) ux_normed = tf.slice(action_t, [0, 0], [-1, 1]) uy_normed = tf.slice(action_t, [0, 1], [-1, 1]) sin1 = tf.sin(ang) cos = tf.cos(ang) t11 = tf.concat([cos[:, :, None], -sin1[:, :, None]], axis=-1) t12 = tf.concat([sin1[:, :, None], cos[:, :, None]], axis=-1) rot_mat1 = tf.concat(axis=1, values=[t11, t12]) sin2 = tf.sin(-ang) t21 = tf.concat([cos[:, :, None], -sin2[:, :, None]], axis=-1) t22 = tf.concat([sin2[:, :, None], cos[:, :, None]], axis=-1) rot_mat2 = tf.concat(axis=1, values=[t21, t22]) # rotate the normal to get the boundary forces fb1 = tf.matmul(rot_mat1, tf.reshape(normal_t, [-1, 2, 1])) fb2 = tf.matmul(rot_mat2, tf.reshape(normal_t, [-1, 2, 1])) fbx1, fby1 = tf.unstack(tf.reshape(fb1, [-1, 2]), axis=-1) fbx2, fby2 = tf.unstack(tf.reshape(fb2, [-1, 2]), axis=-1) # torque m1 = tf.multiply(rx, fby1[:, None]) - tf.multiply(ry, fbx1[:, None]) m2 = tf.multiply(rx, fby2[:, None]) - tf.multiply(ry, fbx2[:, None]) # calculate the velocity at the contact point induced by the # boundary-forces vx_tmp1 = tf.multiply(fr2, fbx1[:, None]) vy_tmp1 = tf.multiply(fr2, fby1[:, None]) vx_tmp2 = tf.multiply(fr2, fbx2[:, None]) vy_tmp2 = tf.multiply(fr2, fby2[:, None]) vbx1 = vx_tmp1 - tf.multiply(m1, ry) vby1 = vy_tmp1 + tf.multiply(m1, rx) vbx2 = vx_tmp2 - tf.multiply(m2, ry) vby2 = vy_tmp2 + tf.multiply(m2, rx) n1 = tf.sqrt(tf.square(vbx1)+tf.square(vby1)) n2 = tf.sqrt(tf.square(vbx2)+tf.square(vby2)) # if we have the slipping case, we need to find the correct # boundary velocity and the scaling factor ang1 = tf.divide(vbx1 * ux_normed + vby1 * uy_normed, n1) ang2 = tf.divide(vbx2 * ux_normed + vby2 * uy_normed, n2) # if the angle between the push and one of the boundarie # velocities is greater than the angle between the two # boundary velocities, the push is sliding ang3 = tf.divide(vbx2 * vbx1 + vby2 * vby1, n1 * n2) b1 = tf.concat([vbx1, vby1], axis=1) b2 = tf.concat([vbx2, vby2], axis=1) vb = tf.where(tf.squeeze(tf.greater_equal(ang1, ang2)), b1, b2) vbx = tf.slice(vb, [0, 0], [-1, 1]) vby = tf.slice(vb, [0, 1], [-1, 1]) kappa = tf.divide(nx * action[:, 0:1] + ny * action[:, 1:], tf.multiply(nx, vbx) + tf.multiply(ny, vby)) sticking = tf.logical_and(tf.less_equal(ang3, ang1), tf.less_equal(ang3, ang2)) vp = tf.multiply(kappa, vb) # check sticking or sliding vp_out = tf.where(tf.squeeze(sticking), action, vp) # if the normal or action were not properly defined, return the action # to not create any dependencies vp_out = tf.where(tf.logical_and(tf.squeeze(ok_normal), tf.squeeze(push)), vp_out, action) # check if the pusher moves away from the contact normed_a = action_t/tf.linalg.norm(action_t, axis=-1, keepdims=True) push_angle = tf.squeeze(tf.matmul(normed_a[:, None, :], normal_t[:, :, None])) lose_contact = tf.squeeze(tf.less(push_angle, -1e-2)) # we can only break contact if there was contact in the first place lose_contact = tf.logical_and(lose_contact, tf.squeeze(tf.greater(contact, 0.))) # and if both normal and action were properly defined lose_contact = tf.logical_and(lose_contact, tf.squeeze(ok_normal)) lose_contact = tf.logical_and(lose_contact, tf.squeeze(push)) # in this case, the resulting push velocity is zero vp_out = tf.where(lose_contact, 0*vp_out, vp_out) keep_contact = tf.logical_not(lose_contact) return vp_out, keep_contact def physical_model_derivative(xos, contact_points, normals, actions, friction, mu, contact): """ Predict the outcome of a single pushing action given the current state of the object. In addition, computes derivatives for constructing the jacobian of the process model Parameters ---------- xos : tensor The position of the object's center of mass contact_points : tensor The position of the contact point between pusher and object normals : tensor The normal to the object surface at the contact point actions : tensor The pusher movement (in x and y direction) friction : tensor A friction-related parameter mu : tensor Friction coefficient between pusher and object contact : tensor Indicates if the pusher is in contact with the object at all Returns ------- tr : tensor The translation of the object in x and y direction rot : tensor The rotation of the object keep_contact : tensor If the pusher will still be in contact with the object after the push ddx : tensor Derivative of the object x-translation with respect to the input values ddy : tensor Derivative of the object y-translation with respect to the input values dor : tensor Derivative of the object rotation with respect to the input values """ bs = contact.get_shape()[0].value dim_x = 10 # binarize the contact cont = tf.where(tf.greater_equal(contact, 0.5), tf.ones_like(contact), tf.zeros_like(contact)) with tf.variable_scope('prediction'): # first calculate the distance between the contact point and # the object r = contact_points - xos rx = tf.slice(r, [0, 0], [-1, 1]) ry = tf.slice(r, [0, 1], [-1, 1]) nx = tf.slice(normals, [0, 0], [-1, 1]) ny = tf.slice(normals, [0, 1], [-1, 1]) vp, dvpx, dvpy, keep_contact = \ get_contact_mode_derivative(rx, ry, actions, friction, mu, nx, ny, cont) dvpxs = tf.unstack(dvpx, dim_x, axis=2) dvpys = tf.unstack(dvpy, dim_x, axis=2) dx, dy, rot, dddx, dddy, ddrot = \ get_vel_model_derivative(vp, r, friction) # dvpx, dvpy, drx, dry, df dxs = tf.unstack(dddx, 5, axis=-1) dys = tf.unstack(dddy, 5, axis=-1) drs = tf.unstack(ddrot, 5, axis=-1) ddx = \ tf.stack([cont*(-dxs[2] + dxs[0]*dvpxs[0] + dxs[1]*dvpys[0]), cont*(-dxs[3] + dxs[0]*dvpxs[1] + dxs[1]*dvpys[1]), tf.zeros([bs, 1], dtype=tf.float32), cont*( dxs[4] + dxs[0]*dvpxs[3] + dxs[1]*dvpys[3]), cont*( dxs[0]*dvpxs[4] + dxs[1]*dvpys[4]), cont*( dxs[2] + dxs[0]*dvpxs[5] + dxs[1]*dvpys[5]), cont*( dxs[3] + dxs[0]*dvpxs[6] + dxs[1]*dvpys[6]), cont*( dxs[0]*dvpxs[7] + dxs[1]*dvpys[7]), cont*( dxs[0]*dvpxs[8] + dxs[1]*dvpys[8]), #dcont*dx], axis=-1) tf.zeros([bs, 1], dtype=tf.float32)], axis=-1) ddy = \ tf.stack([cont*(-dys[2] + dys[0]*dvpxs[0] + dys[1]*dvpys[0]), cont*(-dys[3] + dys[0]*dvpxs[1] + dys[1]*dvpys[1]), tf.zeros([bs, 1], dtype=tf.float32), cont*( dys[4] + dys[0]*dvpxs[3] + dys[1]*dvpys[3]), cont*( dys[0]*dvpxs[4] + dys[1]*dvpys[4]), cont*( dys[2] + dys[0]*dvpxs[5] + dys[1]*dvpys[5]), cont*( dys[3] + dys[0]*dvpxs[6] + dys[1]*dvpys[6]), cont*( dys[0]*dvpxs[7] + dys[1]*dvpys[7]), cont*( dys[0]*dvpxs[8] + dys[1]*dvpys[8]), #dcont*dy], axis=-1) tf.zeros([bs, 1], dtype=tf.float32)], axis=-1) dor = \ tf.stack([cont*(-drs[2] + drs[0]*dvpxs[0] + drs[1]*dvpys[0]), cont*(-drs[3] + drs[0]*dvpxs[1] + drs[1]*dvpys[1]), tf.zeros([bs, 1], dtype=tf.float32), cont*( drs[4] + drs[0]*dvpxs[3] + drs[1]*dvpys[3]), cont*( drs[0]*dvpxs[4] + drs[1]*dvpys[4]), cont*( drs[2] + drs[0]*dvpxs[5] + drs[1]*dvpys[5]), cont*( drs[3] + drs[0]*dvpxs[6] + drs[1]*dvpys[6]), cont*( drs[0]*dvpxs[7] + drs[1]*dvpys[7]), cont*( drs[0]*dvpxs[8] + drs[1]*dvpys[8]), #dcont*rot], axis=-1) tf.zeros([bs, 1], dtype=tf.float32)], axis=-1) tr = tf.concat([dx, dy], axis=-1) * cont rot = rot * cont return tr, rot, tf.reshape(keep_contact, [-1, 1]), ddx, ddy, dor def get_vel_model_derivative(vp, contact_points, fr): """ Given an effective push, predict the translation and rotation of the object. In addition, computes derivatives for constructing the jacobian of the process model Parameters ---------- vp : tensor Effective push contact_points : tensor contact point fr : tensor Friction related parameter Returns ------- tx : tensor Translation in x ty : tensor Translation in y rot : tensor Object rotation. dx : tensor Derivative of tx with respect to the input values dy : tensor Derivative of ty with respect to the input values drot : tensor Derivative of rot with respect to the input values """ with tf.variable_scope('calculate_velocity'): rx = tf.slice(contact_points, [0, 0], [-1, 1]) rz = tf.slice(contact_points, [0, 1], [-1, 1]) ux = tf.slice(vp, [0, 0], [-1, 1]) uz = tf.slice(vp, [0, 1], [-1, 1]) rx2 = tf.square(rx) rz2 = tf.square(rz) div = 100*fr + rx2 + rz2 tx_tmp = tf.multiply((100*fr + rx2), ux) + \ tf.multiply(rx, tf.multiply(rz, uz)) tx = tf.divide(tx_tmp, div) tz_tmp = tf.multiply((100*fr + rz2), uz) + \ tf.multiply(rx, tf.multiply(rz, ux)) tz = tf.divide(tz_tmp, div) rot_tmp = tf.multiply(rx, tz) - tf.multiply(rz, tx) rot = tf.divide(rot_tmp, 100*fr) dxdf = (100*ux*div - 100*tx_tmp)/div**2 dydf = (100*uz*div - 100*tz_tmp)/div**2 dxdrx = ((2*rx*ux + rz*uz)*div - 2*rx*tx_tmp)/div**2 dxdrz = (rx*uz*div - 2*rz*tx_tmp)/div**2 dydrz = ((2*rz*uz + rx*ux)*div - 2*rz*tz_tmp)/div**2 dydrx = (rz*ux*div - 2*rx*tz_tmp)/div**2 dxdux = (100*fr + rx2)/div dxduz = (rx*rz)/div dyduz = (100*fr + rz2)/div dydux = (rx*rz)/div drdf = ((rx*dydf - rz*dxdf)*100*fr - 100*rot_tmp)/(100*fr)**2 drdrx = (tz + rx*dydrx - rz*dxdrx)/(100*fr) drdrz = (rx*dydrz - tx - rz*dxdrz)/(100*fr) drdux = (rx*dydux - rz*dxdux)/(100*fr) drduz = (rx*dyduz - rz*dxduz)/(100*fr) # dvpx, dvpy, drx, dry, df dx = tf.stack([dxdux, dxduz, dxdrx, dxdrz, dxdf], axis=-1) dy = tf.stack([dydux, dyduz, dydrx, dydrz, dydf], axis=-1) drot = tf.stack([drdux, drduz, drdrx, drdrz, drdf], axis=-1) return tx, tz, rot, dx, dy, drot def get_contact_mode_derivative(rx, ry, action, fr, mu, nnx, nny, contact): """ Determines the contact mode (sticking or sliding) and the effective push. In addition, computes derivatives for constructing the jacobian of the process model Parameters ---------- rx : tensor x-coordinate of the contact point ry : tensor y-coordinate of the contact point action : tensor The pusher movement (in x and y direction) fr : tensor A friction related parameter mu : tensor Friction coefficient between pusher and object nx : tensor x-component of the normal ny : tensor y-component of the normal contact : tensor Indicates if the pusher is in contact with the object at all Returns ------- vp_out : tensor the effective push dvpx : tensor Derivative of the x-component of vp_out with respect to the input values dvpx : tensor Derivative of the y-component of vp_out with respect to the input values keep_contact : tensor If the pusher will still be in contact with the object after the push """ # dim_x = 10 bs = rx.get_shape()[0] fri = 100 * fr # for calculate the boundary forces of the friction cone ang = mu/180.*np.pi # ang = tf.math.atan(mu) normal = tf.concat([nnx, nny], axis=-1) normal_norm = tf.linalg.norm(normal, axis=-1) ok_normal = tf.greater(normal_norm, 1e-6) # if we don't have a normal, we simulate one to prevent nans normal_t = tf.where(ok_normal, normal, normal + tf.ones_like(normal)) normal_t = normal_t/tf.norm(normal_t, axis=-1, keepdims=True) nx = tf.slice(normal_t, [0, 0], [-1, 1]) nz = tf.slice(normal_t, [0, 1], [-1, 1]) # check if the normal points towards the object dir_center = - tf.concat([rx, ry], axis=-1) dir_center_norm = tf.linalg.norm(dir_center, axis=-1, keepdims=True) dir_center = tf.where(tf.greater(tf.squeeze(dir_center_norm), 0.), dir_center/dir_center_norm, dir_center) prod = tf.matmul(tf.reshape(dir_center, [-1, 1, 2]), tf.reshape(normal_t, [-1, 2, 1])) prod = tf.clip_by_value(prod, -0.999999999, 0.999999999) n_ang = tf.acos(tf.reshape(prod, [-1])) # # correct values over 180 deg. # n_ang = tf.where(tf.greater(tf.abs(n_ang), np.pi), # 2*np.pi - tf.abs(n_ang), tf.abs(n_ang)) # if the angle is greater than 90 degree, the normal is incorrect ok_normal = tf.logical_and(ok_normal, tf.less(tf.abs(n_ang), np.pi/2. + 0.1)) # same for the push push_norm = tf.linalg.norm(action, axis=-1) push = tf.greater(push_norm, 1e-6) action_t = tf.where(push, tf.identity(action), action + tf.ones_like(action)) uux = tf.slice(action_t, [0, 0], [-1, 1]) uuz = tf.slice(action_t, [0, 1], [-1, 1]) # new method using rotation matrix sin1 = tf.sin(ang) cos = tf.cos(ang) sin1 = tf.sin(ang) cos = tf.cos(ang) t11 = tf.concat([cos[:, :, None], -sin1[:, :, None]], axis=-1) t12 = tf.concat([sin1[:, :, None], cos[:, :, None]], axis=-1) rot_mat1 = tf.concat(axis=1, values=[t11, t12]) sin2 = tf.sin(-ang) t21 = tf.concat([cos[:, :, None], -sin2[:, :, None]], axis=-1) t22 = tf.concat([sin2[:, :, None], cos[:, :, None]], axis=-1) rot_mat2 = tf.concat(axis=1, values=[t21, t22]) # rotate the normal to get the boundary forces fb1 = tf.matmul(rot_mat1, tf.reshape(normal_t, [-1, 2, 1])) fb2 = tf.matmul(rot_mat2, tf.reshape(normal_t, [-1, 2, 1])) fbx1, fbz1 = tf.unstack(tf.reshape(fb1, [-1, 2]), axis=-1) fbx2, fbz2 = tf.unstack(tf.reshape(fb2, [-1, 2]), axis=-1) # torque m1 = tf.multiply(rx, fbz1[:, None]) - tf.multiply(ry, fbx1[:, None]) m2 = tf.multiply(rx, fbz2[:, None]) - tf.multiply(ry, fbx2[:, None]) # calculate the velocity at the contact point induced by the # boundary-forces vx_tmp1 = tf.multiply(fri, fbx1[:, None]) vz_tmp1 = tf.multiply(fri, fbz1[:, None]) vx_tmp2 = tf.multiply(fri, fbx2[:, None]) vz_tmp2 = tf.multiply(fri, fbz2[:, None]) omega1 = m1 omega2 = m2 vbx1 = vx_tmp1 - tf.multiply(omega1, ry) vbz1 = vz_tmp1 + tf.multiply(omega1, rx) vbx2 = vx_tmp2 - tf.multiply(omega2, ry) vbz2 = vz_tmp2 + tf.multiply(omega2, rx) # if we have the slipping case, we need to find the correct # boundary velocity and the scaling factor ang1 = tf.divide(tf.multiply(vbx1, uux)+tf.multiply(vbz1, uuz), tf.multiply(tf.sqrt(tf.square(uux)+tf.square(uuz)), tf.sqrt(tf.square(vbx1)+tf.square(vbz1)))) ang2 = tf.divide(tf.multiply(vbx2, uux)+tf.multiply(vbz2, uuz), tf.multiply(tf.sqrt(tf.square(uux)+tf.square(uuz)), tf.sqrt(tf.square(vbx2)+tf.square(vbz2)))) # if the angle between the push and one of the boundarie # velocities is greater than the angle between the two # boundary velocities, the push is sliding ang3 = tf.divide(tf.multiply(vbx2, vbx1)+tf.multiply(vbz2, vbz1), tf.multiply(tf.sqrt(tf.square(vbx1)+tf.square(vbz1)), tf.sqrt(tf.square(vbx2)+tf.square(vbz2)))) vb = tf.where(tf.squeeze(tf.greater_equal(ang1, ang2)), tf.concat([vbx1, vbz1], axis=1), tf.concat([vbx2, vbz2], axis=1)) vbx = tf.slice(vb, [0, 0], [-1, 1]) vbz = tf.slice(vb, [0, 1], [-1, 1]) kappa = tf.divide(tf.multiply(nx, uux) + tf.multiply(nz, uuz), tf.multiply(nx, vbx) + tf.multiply(nz, vbz)) sticking = tf.logical_and(tf.less_equal(ang3, ang1), tf.less_equal(ang3, ang2)) vp = tf.multiply(kappa, vb) # check sticking or sliding vp_out = tf.where(tf.squeeze(sticking), action, vp) # if the normal or action were not properly defined, return the action # to not create any dependencies vp_out = tf.where(tf.logical_and(tf.squeeze(ok_normal), tf.squeeze(push)), vp_out, action) # check if the pusher moves away from the contact normed_a = action_t/tf.linalg.norm(action_t, axis=-1, keepdims=True) push_angle = tf.squeeze(tf.matmul(normed_a[:, None, :], normal_t[:, :, None])) # happens at an angle of greater than 91 deg lose_contact = tf.squeeze(tf.less(push_angle, -1e-2)) # we can only break contact if there was contact in the first place lose_contact = tf.logical_and(lose_contact, tf.squeeze(tf.greater(contact, 0.))) # and if both normal and action were properly defined lose_contact = tf.logical_and(lose_contact, tf.squeeze(ok_normal)) lose_contact = tf.logical_and(lose_contact, tf.squeeze(push)) # in this case, the resulting push velocity is zero vp_out = tf.where(lose_contact, 0*vp_out, vp_out) vpx = tf.slice(vp_out, [0, 0], [-1, 1]) vpy = tf.slice(vp_out, [0, 1], [-1, 1]) # gradients dvpx = tf.stack([tf.reshape(-tf.gradients(vpx, rx)[0], [bs, 1]), tf.reshape(-tf.gradients(vpx, ry)[0], [bs, 1]), tf.zeros([bs, 1]), tf.reshape(tf.gradients(vpx, fr)[0], [bs, 1]), tf.reshape(tf.gradients(vpx, mu)[0], [bs, 1]), tf.reshape(tf.gradients(vpx, rx)[0], [bs, 1]), tf.reshape(tf.gradients(vpx, ry)[0], [bs, 1]), tf.reshape(tf.gradients(vpx, nnx)[0], [bs, 1]), tf.reshape(tf.gradients(vpx, nny)[0], [bs, 1]), tf.zeros([bs, 1])], axis=-1) dvpy = tf.stack([tf.reshape(-tf.gradients(vpy, rx)[0], [bs, 1]), tf.reshape(-tf.gradients(vpy, ry)[0], [bs, 1]), tf.zeros([bs, 1]), tf.reshape(tf.gradients(vpy, fr)[0], [bs, 1]), tf.reshape(tf.gradients(vpy, mu)[0], [bs, 1]), tf.reshape(tf.gradients(vpy, rx)[0], [bs, 1]), tf.reshape(tf.gradients(vpy, ry)[0], [bs, 1]), tf.reshape(tf.gradients(vpy, nnx)[0], [bs, 1]), tf.reshape(tf.gradients(vpy, nny)[0], [bs, 1]), tf.zeros([bs, 1])], axis=-1) return vp_out, dvpx, dvpy, tf.logical_not(lose_contact) ########################################################################### # projections between 2d and 3d ########################################################################### def _to_2d(point, in_frame='world'): w2c = np.array([[0., 1., 0., 0.], [0.66896468, -0., -0.74329412, -0.], [-0.74329412, -0., -0.66896468, 0.67268115], [0.0, 0.00, 0.0, 1.0]], dtype=np.float32) fx = 231.764480591 fy = 231.76448822021484 if in_frame != 'camera': point = tf.slice(point, [0, 0], [-1, 3]) point = _to_cam_frame(point, w2c) xs = tf.slice(point, [0, 0], [-1, 1]) ys = tf.slice(point, [0, 1], [-1, 1]) zs = tf.slice(point, [0, 2], [-1, 1]) # project out = [tf.divide(xs, zs) * fx, tf.divide(ys, zs)*fy] out = tf.concat(out, axis=1) return out def _to_3d(point, image): w2c = np.array([[ 0., 1., 0., 0.], [ 0.66896468, -0., -0.74329412, -0.], [-0.74329412, -0., -0.66896468, 0.67268115], [0.0, 0.00, 0.0, 1.0]], dtype=np.float32) c2w = np.linalg.inv(w2c) fx = 231.764480591 fy = 231.76448822021484 # fx = 289.7056007385254 # fy = 289.70561027526855 width = image.get_shape()[2].value height = image.get_shape()[1].value shape = point.get_shape() # get the z-value # grab 4 nearest corner points around the pixel coordinates coords_x = tf.slice(point, [0, 0], [-1, 1]) coords_y = tf.slice(point, [0, 1], [-1, 1]) x = coords_x + (width / 2.) y = coords_y + (height / 2.) x0s = tf.cast(tf.floor(x), 'int32') x1s = x0s + 1 y0s = tf.cast(tf.floor(y), 'int32') y1s = y0s + 1 # Limit the coordinates to be inside of the image x0s = tf.clip_by_value(x0s, 0, width-1) x1s = tf.clip_by_value(x1s, 0, width-1) y0s = tf.clip_by_value(y0s, 0, height-1) y1s = tf.clip_by_value(y1s, 0, height-1) zs = [] for ind, b in enumerate(tf.unstack(image)): x_c = tf.unstack(x)[ind] y_c = tf.unstack(y)[ind] x0 = tf.unstack(x0s)[ind] x1 = tf.unstack(x1s)[ind] y0 = tf.unstack(y0s)[ind] y1 = tf.unstack(y1s)[ind] # transform the 4 corner points to indices in the # flattened source image base_y0 = y0*width base_y1 = y1*width idx_a = base_y1 + x1 idx_b = base_y0 + x1 idx_c = base_y1 + x0 idx_d = base_y0 + x0 # weighten each corner point according to its distance # to the actual target point x0_f = tf.cast(x0, 'float32') x1_f = tf.cast(x1, 'float32') y0_f = tf.cast(y0, 'float32') y1_f = tf.cast(y1, 'float32') wa = tf.multiply((x1_f - x_c), (y1_f - y_c)) wb = tf.multiply((x1_f - x_c), (y_c - y0_f)) wc = tf.multiply((x_c - x0_f), (y1_f - y_c)) wd = tf.multiply((x_c - x0_f), (y_c - y0_f)) # the interpolation weights should sum up to one (or zero) # so we normalize them norm = tf.add_n([wa, wb, wc, wd]) binary_mask = tf.logical_and(tf.greater(norm, 0.), tf.less(norm, 1.)) wa = tf.divide(wa, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) wb = tf.divide(wb, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) wc = tf.divide(wc, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) wd = tf.divide(wd, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) # use indices to lookup pixels in the flattened images flat = tf.reshape(b, [-1]) flat = tf.cast(flat, 'float32') a = tf.gather(flat, idx_a) b = tf.gather(flat, idx_b) c = tf.gather(flat, idx_c) d = tf.gather(flat, idx_d) zs += [tf.math.abs(tf.reshape(tf.add_n([wa*a, wb*b, wc*c, wd*d]), [1]))] zs = tf.stop_gradient(tf.stack(zs)) # unproject out = tf.concat([coords_x*zs/fx, coords_y*zs/fy, zs, tf.constant(1., shape=[shape[0].value, 1])], axis=1) # transform to world frame out = tf.matmul(c2w, tf.expand_dims(out, -1)) out = tf.reshape(out, [shape[0].value, 4]) return tf.slice(out, [0, 0], [-1, 3]) def _to_3d_d(point, image, target): w2c = np.array([[0., 1., 0., 0.], [0.66896468, -0., -0.74329412, -0.], [-0.74329412, -0., -0.66896468, 0.67268115], [0.0, 0.00, 0.0, 1.0]], dtype=np.float32) c2w = np.linalg.inv(w2c) fx = 231.764480591 fy = 231.76448822021484 width = image.get_shape()[2].value height = image.get_shape()[1].value target_cam = _to_cam_frame(target, w2c) shape = point.get_shape() # get the z-value # grab 4 nearest corner points around the pixel coordinates coords_x = tf.slice(point, [0, 0], [-1, 1]) coords_y = tf.slice(point, [0, 1], [-1, 1]) x = coords_x + (width / 2.) y = coords_y + (height / 2.) x0s = tf.cast(tf.floor(x), 'int32') x1s = x0s + 1 y0s = tf.cast(tf.floor(y), 'int32') y1s = y0s + 1 # Limit the coordinates to be inside of the image x0s = tf.clip_by_value(x0s, 0, width-1) x1s = tf.clip_by_value(x1s, 0, width-1) y0s = tf.clip_by_value(y0s, 0, height-1) y1s = tf.clip_by_value(y1s, 0, height-1) zs = [] for ind, b in enumerate(tf.unstack(image)): x_c = tf.unstack(x)[ind] y_c = tf.unstack(y)[ind] x0 = tf.unstack(x0s)[ind] x1 = tf.unstack(x1s)[ind] y0 = tf.unstack(y0s)[ind] y1 = tf.unstack(y1s)[ind] # transform the 4 corner points to indices in the # flattened source image base_y0 = y0*width base_y1 = y1*width idx_a = base_y1 + x1 idx_b = base_y0 + x1 idx_c = base_y1 + x0 idx_d = base_y0 + x0 # weighten each corner point according to its distance # to the actual target point x0_f = tf.cast(x0, 'float32') x1_f = tf.cast(x1, 'float32') y0_f = tf.cast(y0, 'float32') y1_f = tf.cast(y1, 'float32') wa = tf.multiply((x1_f - x_c), (y1_f - y_c)) wb = tf.multiply((x1_f - x_c), (y_c - y0_f)) wc = tf.multiply((x_c - x0_f), (y1_f - y_c)) wd = tf.multiply((x_c - x0_f), (y_c - y0_f)) # the interpolation weights should sum up to one (or zero) # so we normalize them norm = tf.add_n([wa, wb, wc, wd]) binary_mask = tf.logical_and(tf.greater(norm, 0.), tf.less(norm, 1.)) wa = tf.divide(wa, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) wb = tf.divide(wb, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) wc = tf.divide(wc, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) wd = tf.divide(wd, tf.where(binary_mask, norm, tf.ones_like(norm, dtype=tf.float32))) # use indices to lookup pixels in the flattened images flat = tf.reshape(b, [-1]) flat = tf.cast(flat, 'float32') a = tf.gather(flat, idx_a) b = tf.gather(flat, idx_b) c = tf.gather(flat, idx_c) d = tf.gather(flat, idx_d) zs += [tf.math.abs(tf.reshape(tf.add_n([wa*a, wb*b, wc*c, wd*d]), [1]))] zs = tf.stop_gradient(tf.stack(zs)) diff = tf.abs(zs - target_cam[:, 2:]) zs = tf.where(tf.greater(diff, 0.05), target_cam[:, 2:], zs) # unproject out = tf.concat([coords_x*zs/fx, coords_y*zs/fy, zs, tf.constant(1., shape=[shape[0].value, 1])], axis=1) # transform to world frame out = tf.matmul(c2w, tf.expand_dims(out, -1)) out = tf.reshape(out, [shape[0].value, 4]) return tf.slice(out, [0, 0], [-1, 3]) def _to_world_frame(point, c2w): shape = point.get_shape() if shape[-1] < 4: point = tf.concat([point, tf.ones(shape=[shape[0].value, 1])], axis=1) out = tf.matmul(c2w, tf.expand_dims(point, -1)) out = tf.reshape(out, [shape[0].value, 4]) return tf.slice(out, [0, 0], [-1, 3]) def _to_cam_frame(point, w2c): shape = point.get_shape() if shape[-1] < 4: point = tf.concat([point, tf.ones(shape=[shape[0].value, 1])], axis=1) out = tf.matmul(w2c, tf.expand_dims(point, -1)) out = tf.reshape(out, [shape[0].value, 4]) return tf.slice(out, [0, 0], [-1, 3])
37.303688
80
0.554893
5,067
34,394
3.681468
0.087823
0.034845
0.005146
0.003431
0.818538
0.7838
0.743916
0.726761
0.701083
0.697545
0
0.055388
0.297116
34,394
921
81
37.344191
0.716236
0.242746
0
0.578093
0
0
0.006257
0
0
0
0
0
0
1
0.022312
false
0
0.004057
0
0.048682
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ed9e43c35b17827f2be8e68588df029645aae17c
1,147
py
Python
fabtools/tests/test_vagrant_version.py
bfolliot/fabtools
e9744c282144c225563d915571de4f52cd772fa9
[ "BSD-2-Clause" ]
null
null
null
fabtools/tests/test_vagrant_version.py
bfolliot/fabtools
e9744c282144c225563d915571de4f52cd772fa9
[ "BSD-2-Clause" ]
null
null
null
fabtools/tests/test_vagrant_version.py
bfolliot/fabtools
e9744c282144c225563d915571de4f52cd772fa9
[ "BSD-2-Clause" ]
null
null
null
import unittest from mock import patch class TestVagrantVersion(unittest.TestCase): def test_vagrant_version_1_3_0(self): with patch('fabtools.vagrant.local') as mock_local: mock_local.return_value = "Vagrant version 1.3.0\n" from fabtools.vagrant import version self.assertEqual(version(), (1, 3, 0)) def test_vagrant_version_1_3_1(self): with patch('fabtools.vagrant.local') as mock_local: mock_local.return_value = "Vagrant v1.3.1\n" from fabtools.vagrant import version self.assertEqual(version(), (1, 3, 1)) def test_vagrant_version_1_4_3(self): with patch('fabtools.vagrant.local') as mock_local: mock_local.return_value = "Vagrant 1.4.3\n" from fabtools.vagrant import version self.assertEqual(version(), (1, 4, 3)) def test_vagrant_version_1_5_0_dev(self): with patch('fabtools.vagrant.local') as mock_local: mock_local.return_value = "Vagrant 1.5.0.dev\n" from fabtools.vagrant import version self.assertEqual(version(), (1, 5, 0, 'dev'))
37
63
0.650392
158
1,147
4.512658
0.177215
0.100982
0.105189
0.117812
0.873773
0.782609
0.718093
0.718093
0.718093
0.718093
0
0.04157
0.244987
1,147
30
64
38.233333
0.781755
0
0
0.347826
0
0
0.142982
0.076722
0
0
0
0
0.173913
1
0.173913
false
0
0.26087
0
0.478261
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
edb464b47678a1a0fe0df210e41b0c8f7ba90fa8
213
py
Python
indico/queries/__init__.py
IndicoDataSolutions/indico-client-python
8184199ac2047166afcf246f94f2126dbd5c72ff
[ "MIT" ]
2
2021-08-17T12:59:27.000Z
2022-02-11T18:19:50.000Z
indico/queries/__init__.py
IndicoDataSolutions/indico-client-python
8184199ac2047166afcf246f94f2126dbd5c72ff
[ "MIT" ]
31
2020-03-24T12:02:24.000Z
2022-02-07T15:01:20.000Z
indico/queries/__init__.py
IndicoDataSolutions/indico-client-python
8184199ac2047166afcf246f94f2126dbd5c72ff
[ "MIT" ]
1
2020-10-19T16:18:48.000Z
2020-10-19T16:18:48.000Z
from .datasets import * from .model_groups import * from .jobs import * from .documents import * from .storage import * from .submission import * from .workflow import * from .forms import * from .export import *
21.3
27
0.746479
28
213
5.642857
0.428571
0.506329
0
0
0
0
0
0
0
0
0
0
0.169014
213
9
28
23.666667
0.892655
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
b6a3b317a028d2058d2f80c05393a9db376f6dd1
39
py
Python
pyrism/Closures/__init__.py
2AUK/pyrism
7067fa7a261adc2faabcffbcb2d40d395e42a3c8
[ "MIT" ]
4
2020-10-26T14:32:08.000Z
2021-03-26T01:23:37.000Z
pyrism/Closures/__init__.py
2AUK/pyrism
7067fa7a261adc2faabcffbcb2d40d395e42a3c8
[ "MIT" ]
1
2021-09-17T18:21:19.000Z
2021-11-22T00:01:46.000Z
pyrism/Closures/__init__.py
2AUK/pyrism
7067fa7a261adc2faabcffbcb2d40d395e42a3c8
[ "MIT" ]
1
2022-03-08T12:00:35.000Z
2022-03-08T12:00:35.000Z
from .closure_dispatcher import Closure
39
39
0.897436
5
39
6.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.076923
39
1
39
39
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fcd569c0f6a830193aed1126cd76dae13cd8ce6d
198
py
Python
Trakttv.bundle/Contents/Libraries/Shared/plugin/sync/modes/fast_pull/lists/__init__.py
disrupted/Trakttv.bundle
24712216c71f3b22fd58cb5dd89dad5bb798ed60
[ "RSA-MD" ]
1,346
2015-01-01T14:52:24.000Z
2022-03-28T12:50:48.000Z
Trakttv.bundle/Contents/Libraries/Shared/plugin/sync/modes/fast_pull/lists/__init__.py
alcroito/Plex-Trakt-Scrobbler
4f83fb0860dcb91f860d7c11bc7df568913c82a6
[ "RSA-MD" ]
474
2015-01-01T10:27:46.000Z
2022-03-21T12:26:16.000Z
Trakttv.bundle/Contents/Libraries/Shared/plugin/sync/modes/fast_pull/lists/__init__.py
alcroito/Plex-Trakt-Scrobbler
4f83fb0860dcb91f860d7c11bc7df568913c82a6
[ "RSA-MD" ]
191
2015-01-02T18:27:22.000Z
2022-03-29T10:49:48.000Z
from plugin.sync.modes.fast_pull.lists.liked import LikedLists from plugin.sync.modes.fast_pull.lists.personal import PersonalLists from plugin.sync.modes.fast_pull.lists.watchlist import Watchlist
49.5
68
0.863636
30
198
5.6
0.433333
0.178571
0.25
0.339286
0.571429
0.571429
0.571429
0
0
0
0
0
0.060606
198
3
69
66
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1e07393c0279fc81e738bc4622959783410211bb
8,182
py
Python
hyperion/dust/tests/test_optical_properties.py
bluescarni/hyperion
4a0d33fcbd3943b5bcfbd318f11e199d2498956d
[ "BSD-2-Clause" ]
2
2015-05-14T17:26:16.000Z
2019-03-13T17:33:18.000Z
hyperion/dust/tests/test_optical_properties.py
bluescarni/hyperion
4a0d33fcbd3943b5bcfbd318f11e199d2498956d
[ "BSD-2-Clause" ]
null
null
null
hyperion/dust/tests/test_optical_properties.py
bluescarni/hyperion
4a0d33fcbd3943b5bcfbd318f11e199d2498956d
[ "BSD-2-Clause" ]
null
null
null
from __future__ import print_function, division from astropy.tests.helper import pytest import numpy as np from numpy.testing import assert_array_almost_equal_nulp from ..optical_properties import OpticalProperties from ...util.constants import c def test_init(): OpticalProperties() VECTOR_ATTRIBUTES = ['nu', 'chi', 'albedo', 'mu'] ARRAY_ATTRIBUTES = ['P1', 'P2', 'P3', 'P4'] @pytest.mark.parametrize(('attribute'), VECTOR_ATTRIBUTES) def test_set_vector_list(attribute): o = OpticalProperties() setattr(o, attribute, [0.1, 0.2, 0.3]) @pytest.mark.parametrize(('attribute'), VECTOR_ATTRIBUTES) def test_set_vector_array(attribute): o = OpticalProperties() setattr(o, attribute, np.array([0.1, 0.2, 0.3])) @pytest.mark.parametrize(('attribute'), VECTOR_ATTRIBUTES) def test_set_vector_invalid_type1(attribute): o = OpticalProperties() with pytest.raises(ValueError) as exc: setattr(o, attribute, 'hello') assert exc.value.args[0] == attribute + ' should be a 1-D sequence' @pytest.mark.parametrize(('attribute'), VECTOR_ATTRIBUTES) def test_set_vector_invalid_type2(attribute): o = OpticalProperties() with pytest.raises(ValueError) as exc: setattr(o, attribute, 0.5) assert exc.value.args[0] == attribute + ' should be a 1-D sequence' @pytest.mark.parametrize(('attribute'), VECTOR_ATTRIBUTES) def test_set_vector_invalid_shape1(attribute): o = OpticalProperties() with pytest.raises(ValueError) as exc: setattr(o, attribute, [[0., 1.], [0.5, 1.]]) assert exc.value.args[0] == attribute + ' should be a 1-D sequence' @pytest.mark.parametrize(('attribute'), VECTOR_ATTRIBUTES) def test_set_vector_invalid_shape2(attribute): o = OpticalProperties() with pytest.raises(ValueError) as exc: setattr(o, attribute, np.array([[0., 1.], [0.5, 1.]])) assert exc.value.args[0] == attribute + ' should be a 1-D sequence' @pytest.mark.parametrize(('attribute'), ['nu', 'mu']) def test_set_vector_invalid_order(attribute): o = OpticalProperties() with pytest.raises(ValueError) as exc: setattr(o, attribute, [0.3, 0.1, 0.2]) assert exc.value.args[0] == attribute + ' should be monotonically increasing' def test_range_nu_valid1(): o = OpticalProperties() o.nu = [0.1, 0.5, 0.8] def test_range_nu_invalid1(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.nu = [0., 0.5, 0.8] assert exc.value.args[0] == 'nu should be strictly positive' def test_range_nu_invalid2(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.nu = [-1., 0.5, 0.8] assert exc.value.args[0] == 'nu should be strictly positive' def test_range_chi_valid1(): o = OpticalProperties() o.chi = [0.1, 0.5, 0.8] def test_range_chi_valid2(): o = OpticalProperties() o.chi = [0., 0.5, 0.8] def test_range_chi_invalid1(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.chi = [-1., 0.5, 0.8] assert exc.value.args[0] == 'chi should be positive' def test_range_albedo_valid1(): o = OpticalProperties() o.albedo = [0., 0.5, 1.] def test_range_albedo_invalid1(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.albedo = [-1., 0.5, 0.8] assert exc.value.args[0] == 'albedo should be in the range [0:1]' def test_range_albedo_invalid2(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.albedo = [0., 0.5, 1.1] assert exc.value.args[0] == 'albedo should be in the range [0:1]' def test_range_mu_valid1(): o = OpticalProperties() o.mu = [-0.5, 0., 0.5] def test_range_mu_valid2(): o = OpticalProperties() o.mu = [-1., 0., 1.] def test_range_mu_invalid1(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.mu = [-1.3, 0., 1.] assert exc.value.args[0] == 'mu should be in the range [-1:1]' def test_range_mu_invalid2(): o = OpticalProperties() with pytest.raises(ValueError) as exc: o.mu = [-1., 0., 1.3] assert exc.value.args[0] == 'mu should be in the range [-1:1]' @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_list(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] o.mu = [-0.5, 0.5] setattr(o, attribute, [[1., 2.], [0., 1.], [3., 4.]]) @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_array(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] o.mu = [-0.5, 0.5] setattr(o, attribute, np.ones((3, 2))) @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_invalid_type1(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] o.mu = [-0.5, 0.5] with pytest.raises(ValueError) as exc: setattr(o, attribute, 'hello') assert exc.value.args[0] == attribute + ' should be a 2-D array' @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_invalid_type2(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] o.mu = [-0.5, 0.5] with pytest.raises(ValueError) as exc: setattr(o, attribute, 2.123) assert exc.value.args[0] == attribute + ' should be a 2-D array' @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_invalid_shape1(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] o.mu = [-0.5, 0.5] with pytest.raises(ValueError) as exc: setattr(o, attribute, [1., 2., 3.]) assert exc.value.args[0] == attribute + ' should be a 2-D array' @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_invalid_shape2(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] o.mu = [-0.5, 0.5] with pytest.raises(ValueError) as exc: setattr(o, attribute, np.ones((4, 5))) assert exc.value.args[0] == attribute + ' has an incorrect shape: (4, 5) but expected (3, 2)' @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_invalid_order1(attribute): o = OpticalProperties() o.nu = [0.1, 0.2, 0.3] with pytest.raises(ValueError) as exc: setattr(o, attribute, np.ones((3, 2))) assert exc.value.args[0] == 'mu needs to be set before ' + attribute @pytest.mark.parametrize(('attribute'), ARRAY_ATTRIBUTES) def test_set_array_invalid_order2(attribute): o = OpticalProperties() o.mu = [-0.5, 0.5] with pytest.raises(ValueError) as exc: setattr(o, attribute, np.ones((3, 2))) assert exc.value.args[0] == 'nu needs to be set before ' + attribute def test_extrapolate_inner_range(): o = OpticalProperties() o.nu = np.logspace(8., 10., 100) o.albedo = np.repeat(0.5, 100) o.chi = np.ones(100) o.mu = [-1., 1.] o.initialize_scattering_matrix() o.extrapolate_nu(1e9, 2e9) assert o.nu[0] == 1.e8 and o.nu[-1] == 1.e10 def test_extrapolate_upper(): o = OpticalProperties() o.nu = np.logspace(8., 10., 100) o.albedo = np.repeat(0.5, 100) o.chi = np.ones(100) o.mu = [-1., 1.] o.initialize_scattering_matrix() o.extrapolate_nu(1e9, 1e11) assert o.nu[0] == 1.e8 and o.nu[-1] == 1.e11 def test_extrapolate_lower(): o = OpticalProperties() o.nu = np.logspace(8., 10., 100) o.albedo = np.repeat(0.5, 100) o.chi = np.ones(100) o.mu = [-1., 1.] o.initialize_scattering_matrix() o.extrapolate_nu(1e7, 1e9) assert o.nu[0] == 1.e7 and o.nu[-1] == 1.e10 def test_extrapolate_both(): o = OpticalProperties() o.nu = np.logspace(8., 10., 100) o.albedo = np.repeat(0.5, 100) o.chi = np.ones(100) o.mu = [-1., 1.] o.initialize_scattering_matrix() o.extrapolate_nu(1e7, 1e11) assert o.nu[0] == 1.e7 and o.nu[-1] == 1.e11 def test_extrapolate_wav(): o = OpticalProperties() o.nu = np.logspace(8., 10., 100) o.albedo = np.repeat(0.5, 100) o.chi = np.ones(100) o.mu = [-1., 1.] o.initialize_scattering_matrix() o.extrapolate_wav(1., 1.e20) assert_array_almost_equal_nulp(o.nu[0], c / 1.e16, 2) assert_array_almost_equal_nulp(o.nu[-1], c / 1.e-4, 2)
29.537906
97
0.647397
1,245
8,182
4.129317
0.095582
0.046295
0.07022
0.091033
0.884264
0.84789
0.813849
0.783311
0.766388
0.743046
0
0.058343
0.191396
8,182
276
98
29.644928
0.718712
0
0
0.595122
0
0
0.084331
0
0
0
0
0
0.121951
1
0.165854
false
0
0.029268
0
0.195122
0.004878
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1e32394715d52cb308656166442b7bce8dac15b6
38
py
Python
src/polaris_follower/planners/__init__.py
jaskirat1208/turtlebot-polaris
fe40b0bcccaffab2ea2ba204905989ed81d69d14
[ "BSD-2-Clause" ]
null
null
null
src/polaris_follower/planners/__init__.py
jaskirat1208/turtlebot-polaris
fe40b0bcccaffab2ea2ba204905989ed81d69d14
[ "BSD-2-Clause" ]
null
null
null
src/polaris_follower/planners/__init__.py
jaskirat1208/turtlebot-polaris
fe40b0bcccaffab2ea2ba204905989ed81d69d14
[ "BSD-2-Clause" ]
null
null
null
from .base_planner import BasePlanner
19
37
0.868421
5
38
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.105263
38
1
38
38
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1e4d6d4d18b7a0551435b27f0037f456ad02809a
7,245
py
Python
aeronet_visu/data_loading.py
Tristanovsk/aeronet_visu
03905aa3f9aacae501f0af378afd885ca25cd981
[ "MIT" ]
null
null
null
aeronet_visu/data_loading.py
Tristanovsk/aeronet_visu
03905aa3f9aacae501f0af378afd885ca25cd981
[ "MIT" ]
null
null
null
aeronet_visu/data_loading.py
Tristanovsk/aeronet_visu
03905aa3f9aacae501f0af378afd885ca25cd981
[ "MIT" ]
null
null
null
import os import pandas as pd import numpy as np import re class read: def __init__(self,file): self.file = file def read_aeronet_ocv3(self, skiprows=8): ''' Read and format in pandas data.frame the standard AERONET-OC data ''' dateparse = lambda x: pd.datetime.strptime(x, "%d:%m:%Y %H:%M:%S") ifile=self.file h1 = pd.read_csv(ifile, skiprows=skiprows - 1, nrows=1).columns[3:] h1 = np.insert(h1,0,'site') data_type = h1.str.replace('\[.*\]', '') data_type = data_type.str.replace('Exact_Wave.*', 'wavelength') #convert into float to order the dataframe with increasing wavelength h2 = h1.str.replace('.*\[', '') h2 = h2.str.replace('nm\].*', '') h2 = h2.str.replace('Exact_Wavelengths\(um\)_','') h2 = pd.to_numeric(h2, errors='coerce') #h2.str.extract('(\d+)').astype('float') h2 = h2.fillna('').T df = pd.read_csv(ifile, skiprows=skiprows, na_values=['N/A', -999.0,-9.999999 ], parse_dates={'date': [1, 2]}, date_parser=dateparse, index_col=False) # df['site'] = site # df.set_index(['site', 'date'],inplace=True) df.set_index('date', inplace=True) tuples = list(zip(h1, data_type, h2)) df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2']) df = df.dropna(axis=1, how='all').dropna(axis=0, how='all') df.columns = pd.MultiIndex.from_tuples([(x[0], x[1], x[2]) for x in df.columns]) df.sort_index(axis=1, level=2, inplace=True) return df def read_aeronet_oc(self, skiprows=13): ''' Read and format in pandas data.frame the standard AERONET-OC data ''' dateparse = lambda x: pd.datetime.strptime(x, "%d:%m:%Y %H:%M:%S") ifile=self.file h1 = pd.read_csv(ifile, skiprows=skiprows - 2, nrows=1).columns[2:] h2 = pd.read_csv(ifile, skiprows=skiprows - 1, nrows=1).columns[2:] h1 = h1.append(h2[len(h1):]) data_type = h1.str.replace('\(.*\)', '') data_type = data_type.str.replace('ExactWave.*', 'oc_wavelength') #convert into float to order the dataframe with increasing wavelength h2 = h2.str.extract('(\d+)').astype('float') h2 = h2.fillna('') df = pd.read_csv(ifile, skiprows=skiprows, na_values=['N/A', -999.0,-9.999999 ], parse_dates={'date': [0, 1]}, date_parser=dateparse, index_col=False) # df['site'] = site # df.set_index(['site', 'date'],inplace=True) df.set_index('date', inplace=True) tuples = list(zip(h1, data_type, h2)) df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2']) df = df.dropna(axis=1, how='all').dropna(axis=0, how='all') df.sort_index(axis=1, level=2, inplace=True) return df def read_aeronet(self, skiprows=6): ''' Read and format in pandas data.frame the V3 AERONET data ''' ifile=self.file df = pd.read_csv(ifile, skiprows=skiprows, nrows=1) # read just first line for columns columns = df.columns.tolist() # get the columns cols_to_use = columns[:len(columns) - 1] # drop the last one df = pd.read_csv(ifile, skiprows=skiprows, usecols=cols_to_use, index_col=False, na_values=['N/A', -999.0]) df = df.dropna(axis=1, how='all').dropna(axis=0, how='all') df.rename(columns={'AERONET_Site_Name': 'site', 'Last_Processing_Date(dd/mm/yyyy)': 'Last_Processing_Date'}, inplace=True) format = "%d:%m:%Y %H:%M:%S" df['date'] = pd.to_datetime(df[df.columns[0]] + ' ' + df[df.columns[1]], format=format) # df.set_index(['site','date'], inplace=True) df.set_index('date', inplace=True) df = df.drop(df.columns[[0, 1]], axis=1) # df['year'] = df.index.get_level_values(1).year # cleaning up df.drop(list(df.filter(regex='Input')), axis=1, inplace=True) df.drop(list(df.filter(regex='Empty')), axis=1, inplace=True) df.drop(list(df.filter(regex='Day')), axis=1, inplace=True) # indexing columns with spectral values data_type = df.columns.str.replace('AOD.*nm', 'aot') data_type = data_type.str.replace('Exact_Wave.*', 'wavelength') data_type = data_type.str.replace('Triplet.*[0-9]', 'std') data_type = data_type.str.replace(r'^(?!aot|std|wavelength).*$', '') wl_type = df.columns.str.extract('(\d+)').astype('float') wl_type = wl_type.fillna('') tuples = list(zip(df.columns, data_type, wl_type)) df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2']) if 'wavelength' in df.columns.levels[1]: df.loc[:, (slice(None), 'wavelength',)] = df.loc[:, (slice(None), 'wavelength')] * 1000 # convert into nm df = df.dropna(axis=1, how='all').dropna(axis=0, how='all') df.sort_index(axis=1, level=2, inplace=True) return df def read_aeronet_inv(self, skiprows=6): ''' Read and format in pandas data.frame the V3 Aerosol Inversion AERONET data ''' ifile=self.file df = pd.read_csv(ifile, skiprows=skiprows, nrows=1) # read just first line for columns columns = df.columns.tolist() # get the columns cols_to_use = columns[:len(columns) - 1] # drop the last one df = pd.read_csv(ifile, skiprows=skiprows, usecols=cols_to_use, index_col=False, na_values=['N/A', -999.0]) df = df.dropna(axis=1, how='all').dropna(axis=0, how='all') df.rename(columns={'AERONET_Site_Name': 'site', 'Last_Processing_Date(dd/mm/yyyy)': 'Last_Processing_Date',}, inplace=True) format = "%d:%m:%Y %H:%M:%S" df['date'] = pd.to_datetime(df[df.columns[1]] + ' ' + df[df.columns[2]], format=format) # df.set_index(['site','date'], inplace=True) df.set_index('date', inplace=True) df = df.drop(df.columns[[0, 1]], axis=1) # df['year'] = df.index.get_level_values(1).year # cleaning up df.drop(list(df.filter(regex='Input')), axis=1, inplace=True) df.drop(list(df.filter(regex='Empty')), axis=1, inplace=True) df.drop(list(df.filter(regex='Day')), axis=1, inplace=True) df.drop(list(df.filter(regex='Angle_Bin')), axis=1, inplace=True) # indexing columns with spectral values data_type = df.columns.str.replace('AOD.*nm', 'aot') data_type = data_type.str.replace('Exact_Wave.*', 'wavelength') data_type = data_type.str.replace('Triplet.*[0-9]', 'std') data_type = data_type.str.replace(r'^(?!aot|std|wavelength).*$', '') wl_type = df.columns.str.extract('(\d+)').astype('float') wl_type = wl_type.fillna('') tuples = list(zip(df.columns, data_type, wl_type)) df.columns = pd.MultiIndex.from_tuples(tuples, names=['l0', 'l1', 'l2']) if 'wavelength' in df.columns.levels[1]: df.loc[:, (slice(None), 'wavelength',)] = df.loc[:, (slice(None), 'wavelength')] * 1000 # convert into nm df = df.dropna(axis=1, how='all').dropna(axis=0, how='all') df.sort_index(axis=1, level=2, inplace=True) return df
48.624161
118
0.595721
1,052
7,245
3.993346
0.14924
0.045703
0.03404
0.029993
0.914306
0.914306
0.907403
0.907403
0.907403
0.889074
0
0.02731
0.22167
7,245
149
119
48.624161
0.71768
0.140097
0
0.67619
0
0
0.11271
0.022639
0
0
0
0
0
1
0.047619
false
0
0.038095
0
0.133333
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1e7d7248defc3787d71c7f0cfc021a1bee12e7e8
419
py
Python
src/api/domain/connection/LookupConnectorType/LookupConnectorTypeQuery.py
PythonDataIntegrator/pythondataintegrator
6167778c36c2295e36199ac0d4d256a4a0c28d7a
[ "MIT" ]
14
2020-12-19T15:06:13.000Z
2022-01-12T19:52:17.000Z
src/api/domain/connection/LookupConnectorType/LookupConnectorTypeQuery.py
PythonDataIntegrator/pythondataintegrator
6167778c36c2295e36199ac0d4d256a4a0c28d7a
[ "MIT" ]
43
2021-01-06T22:05:22.000Z
2022-03-10T10:30:30.000Z
src/api/domain/connection/LookupConnectorType/LookupConnectorTypeQuery.py
PythonDataIntegrator/pythondataintegrator
6167778c36c2295e36199ac0d4d256a4a0c28d7a
[ "MIT" ]
4
2020-12-18T23:10:09.000Z
2021-04-02T13:03:12.000Z
from dataclasses import dataclass from infrastructure.cqrs.IQuery import IQuery from domain.connection.LookupConnectorType.LookupConnectorTypeRequest import LookupConnectorTypeRequest from domain.connection.LookupConnectorType.LookupConnectorTypeResponse import LookupConnectorTypeResponse @dataclass class LookupConnectorTypeQuery(IQuery[LookupConnectorTypeResponse]): request: LookupConnectorTypeRequest = None
41.9
105
0.892601
32
419
11.6875
0.5
0.053476
0.106952
0.208556
0
0
0
0
0
0
0
0
0.069212
419
9
106
46.555556
0.958974
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.571429
0
0.857143
0
0
0
1
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1ea964e0ec8351dd1b2dea693320851e948ed4e9
142
py
Python
src/wai/annotations/festvox/specifier/__init__.py
waikato-ufdl/wai-annotations-festvox
b42216325758e4304e3b85be1cf00f037cfea201
[ "Apache-2.0" ]
null
null
null
src/wai/annotations/festvox/specifier/__init__.py
waikato-ufdl/wai-annotations-festvox
b42216325758e4304e3b85be1cf00f037cfea201
[ "Apache-2.0" ]
null
null
null
src/wai/annotations/festvox/specifier/__init__.py
waikato-ufdl/wai-annotations-festvox
b42216325758e4304e3b85be1cf00f037cfea201
[ "Apache-2.0" ]
null
null
null
from ._FestVoxInputFormatSpecifier import FestVoxInputFormatSpecifier from ._FestVoxOutputFormatSpecifier import FestVoxOutputFormatSpecifier
47.333333
71
0.929577
8
142
16.25
0.5
0
0
0
0
0
0
0
0
0
0
0
0.056338
142
2
72
71
0.970149
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
94992465cf0bfba22c94c36899690060761a9d5d
4,359
py
Python
src/tweet_nlp/text_expressions/slang.py
StevenVuong/twitter_scraper_sentiment_analysis
6306dcb7e43d53da8d53c9d90d81d70dae442665
[ "MIT" ]
2
2020-05-11T16:48:40.000Z
2020-05-11T21:03:10.000Z
src/tweet_nlp/text_expressions/slang.py
StevenVuong/twitter_scraper_sentiment_analysis
6306dcb7e43d53da8d53c9d90d81d70dae442665
[ "MIT" ]
null
null
null
src/tweet_nlp/text_expressions/slang.py
StevenVuong/twitter_scraper_sentiment_analysis
6306dcb7e43d53da8d53c9d90d81d70dae442665
[ "MIT" ]
null
null
null
from typing import Union, List, Dict # Ref: https://github.com/rishabhverma17/sms_slang_translator/blob/master/slang.txt SLANG = \ """ AFAIK=As Far As I Know AFK=Away From Keyboard ASAP=As Soon As Possible ATK=At The Keyboard ATM=At The Moment A3=Anytime, Anywhere, Anyplace BAK=Back At Keyboard BBL=Be Back Later BBS=Be Back Soon BFN=Bye For Now B4N=Bye For Now BRB=Be Right Back BRT=Be Right There BTW=By The Way B4=Before B4N=Bye For Now CU=See You CUL8R=See You Later CYA=See You FAQ=Frequently Asked Questions FC=Fingers Crossed FWIW=For What It's Worth FYI=For Your Information GAL=Get A Life GG=Good Game GN=Good Night GMTA=Great Minds Think Alike GR8=Great! G9=Genius IC=I See ICQ=I Seek you (also a chat program) ILU=ILU: I Love You IMHO=In My Honest/Humble Opinion IMO=In My Opinion IOW=In Other Words IRL=In Real Life KISS=Keep It Simple, Stupid LDR=Long Distance Relationship LMAO=Laugh My A.. Off LOL=Laughing Out Loud LTNS=Long Time No See L8R=Later MTE=My Thoughts Exactly M8=Mate NRN=No Reply Necessary OIC=Oh I See PITA=Pain In The A.. PRT=Party PRW=Parents Are Watching QPSA?=Que Pasa? ROFL=Rolling On The Floor Laughing ROFLOL=Rolling On The Floor Laughing Out Loud ROTFLMAO=Rolling On The Floor Laughing My A.. Off SK8=Skate STATS=Your sex and age ASL=Age, Sex, Location THX=Thank You TTFN=Ta-Ta For Now! TTYL=Talk To You Later U=You U2=You Too U4E=Yours For Ever WB=Welcome Back WTF=What The F... WTG=Way To Go! WUF=Where Are You From? W8=Wait... 7K=Sick:-D Laugher """ SLANG_LIST = ['FWIW', 'L8R', 'M8', 'TTYL', 'WB', 'U4E', '7K', 'THX', 'BAK', 'U2', 'ILU', 'ASL', 'SK8', 'LMAO', 'WTF', 'ATK', 'A3', 'GG', 'U', 'ATM', 'NRN', 'GAL', 'GMTA', 'PRW', 'BTW', 'ASAP', 'FAQ', 'OIC', 'ROFL', 'CUL8R', 'MTE', 'LTNS', 'FYI', 'BRB', 'BBS', 'B4', 'IRL', 'KISS', 'GN', 'IC', 'IMO', 'ROTFLMAO', 'AFAIK', 'B4N', 'BRT', 'GR8', 'PRT', 'TTFN', 'WTG', 'WUF', 'BFN', 'W8', 'CU', 'G9', 'FC', 'LOL', 'PITA', 'BBL', 'ICQ', 'AFK', 'LDR', 'QPSA?', 'STATS', 'ROFLOL', 'IMHO', 'CYA', 'IOW'] SLANG_DICT = {'AFAIK': 'As Far As I Know', 'AFK': 'Away From Keyboard', 'ASAP': 'As Soon As Possible', 'ATK': 'At The Keyboard', 'ATM': 'At The Moment', 'A3': 'Anytime, Anywhere, Anyplace', 'BAK': 'Back At Keyboard', 'BBL': 'Be Back Later', 'BBS': 'Be Back Soon', 'BFN': 'Bye For Now', 'B4N': 'Bye For Now', 'BRB': 'Be Right Back', 'BRT': 'Be Right There', 'BTW': 'By The Way', 'B4': 'Before', 'CU': 'See You', 'CUL8R': 'See You Later', 'CYA': 'See You', 'FAQ': 'Frequently Asked Questions', 'FC': 'Fingers Crossed', 'FWIW': "For What It's Worth", 'FYI': 'For Your Information', 'GAL': 'Get A Life', 'GG': 'Good Game', 'GN': 'Good Night', 'GMTA': 'Great Minds Think Alike', 'GR8': 'Great!', 'G9': 'Genius', 'IC': 'I See', 'ICQ': 'I Seek you (also a chat program)', 'ILU': 'ILU: I Love You', 'IMHO': 'In My Honest/Humble Opinion', 'IMO': 'In My Opinion', 'IOW': 'In Other Words', 'IRL': 'In Real Life', 'KISS': 'Keep It Simple, Stupid', 'LDR': 'Long Distance Relationship', 'LMAO': 'Laugh My A.. Off', 'LOL': 'Laughing Out Loud', 'LTNS': 'Long Time No See', 'L8R': 'Later', 'MTE': 'My Thoughts Exactly', 'M8': 'Mate', 'NRN': 'No Reply Necessary', 'OIC': 'Oh I See', 'PITA': 'Pain In The A..', 'PRT': 'Party', 'PRW': 'Parents Are Watching', 'QPSA?': 'Que Pasa?', 'ROFL': 'Rolling On The Floor Laughing', 'ROFLOL': 'Rolling On The Floor Laughing Out Loud', 'ROTFLMAO': 'Rolling On The Floor Laughing My A.. Off', 'SK8': 'Skate', 'STATS': 'Your sex and age', 'ASL': 'Age, Sex, Location', 'THX': 'Thank You', 'TTFN': 'Ta-Ta For Now!', 'TTYL': 'Talk To You Later', 'U': 'You', 'U2': 'You Too', 'U4E': 'Yours For Ever', 'WB': 'Welcome Back', 'WTF': 'What The F...', 'WTG': 'Way To Go!', 'WUF': 'Where Are You From?', 'W8': 'Wait...', '7K': 'Sick:-D Laugher'} def get_slang_list(slang_docstring:str = SLANG) -> Union[List, Dict]: """Get list of slang words and dict from single docstring. Args: - slang_docstring(str) Return: - SLANG_LIST (List(str)) - SLANG_MAP_DICT (Dict(str:str)) """ slang_map_dict = {} slang_list = [] for line in slang_docstring.split("\n"): if line != "": cw = line.split("=")[0] cw_expanded = line.split("=")[1] slang_list.append(cw) slang_map_dict[cw] = cw_expanded slang_list = list(set(slang_list)) return slang_list, slang_map_dict
44.479592
1,751
0.638449
719
4,359
3.835883
0.296245
0.026106
0.026106
0.036983
0.751994
0.751994
0.751994
0.751994
0.751994
0.751994
0
0.012141
0.168617
4,359
97
1,752
44.938144
0.748896
0.056206
0
0
0
0
0.520925
0
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.2
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
94a8416146e998b44cd1aedf8c66839797b6dfbd
10,943
py
Python
pyson/pyson/pysonLexer.py
ZizhouJia/pyson
ba80336e6ec43456c0d1bf3e71109609b9489181
[ "MIT" ]
2
2019-10-15T14:05:18.000Z
2019-12-02T05:58:31.000Z
pyson/pyson/pysonLexer.py
ZizhouJia/pyson
ba80336e6ec43456c0d1bf3e71109609b9489181
[ "MIT" ]
null
null
null
pyson/pyson/pysonLexer.py
ZizhouJia/pyson
ba80336e6ec43456c0d1bf3e71109609b9489181
[ "MIT" ]
null
null
null
# Generated from pyson.g4 by ANTLR 4.7.1 from antlr4 import * from io import StringIO from typing.io import TextIO import sys def serializedATN(): with StringIO() as buf: buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\26") buf.write("\u0107\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7") buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r") buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23") buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\3\2\5\2") buf.write("\61\n\2\3\2\6\2\64\n\2\r\2\16\2\65\3\3\5\39\n\3\3\3\6") buf.write("\3<\n\3\r\3\16\3=\3\3\3\3\7\3B\n\3\f\3\16\3E\13\3\5\3") buf.write("G\n\3\3\3\3\3\6\3K\n\3\r\3\16\3L\5\3O\n\3\3\3\3\3\5\3") buf.write("S\n\3\3\3\6\3V\n\3\r\3\16\3W\5\3Z\n\3\3\4\3\4\3\4\3\4") buf.write("\3\4\3\4\3\4\3\4\5\4d\n\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5") buf.write("\3\5\3\5\3\5\5\5p\n\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6") buf.write("\5\6z\n\6\3\7\3\7\3\7\3\7\3\7\3\b\6\b\u0082\n\b\r\b\16") buf.write("\b\u0083\3\b\7\b\u0087\n\b\f\b\16\b\u008a\13\b\3\t\3\t") buf.write("\3\t\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\7\13\u0097\n") buf.write("\13\f\13\16\13\u009a\13\13\3\13\3\13\3\13\3\13\3\13\3") buf.write("\13\7\13\u00a2\n\13\f\13\16\13\u00a5\13\13\3\13\5\13\u00a8") buf.write("\n\13\3\f\5\f\u00ab\n\f\3\f\6\f\u00ae\n\f\r\f\16\f\u00af") buf.write("\3\f\7\f\u00b3\n\f\f\f\16\f\u00b6\13\f\3\f\3\f\6\f\u00ba") buf.write("\n\f\r\f\16\f\u00bb\3\f\7\f\u00bf\n\f\f\f\16\f\u00c2\13") buf.write("\f\3\f\3\f\3\f\7\f\u00c7\n\f\f\f\16\f\u00ca\13\f\5\f\u00cc") buf.write("\n\f\7\f\u00ce\n\f\f\f\16\f\u00d1\13\f\3\r\3\r\3\16\3") buf.write("\16\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3\22\3\23\3\23") buf.write("\3\24\3\24\3\25\3\25\3\25\3\25\7\25\u00e7\n\25\f\25\16") buf.write("\25\u00ea\13\25\3\25\5\25\u00ed\n\25\3\25\3\25\3\25\3") buf.write("\25\3\26\3\26\3\26\3\26\7\26\u00f7\n\26\f\26\16\26\u00fa") buf.write("\13\26\3\26\3\26\3\26\3\26\3\26\3\27\6\27\u0102\n\27\r") buf.write("\27\16\27\u0103\3\27\3\27\6\u0098\u00a3\u00e8\u00f8\2") buf.write("\30\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\2\23\2\25\n\27\13") buf.write("\31\f\33\r\35\16\37\17!\20#\21%\22\'\23)\24+\25-\26\3") buf.write("\2\n\4\2--//\3\2\62;\4\2GGgg\5\2C\\aac|\6\2\62;C\\aac") buf.write("|\b\2$$^^ddppttvv\3\2\63;\5\2\13\f\17\17\"\"\2\u0128\2") buf.write("\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3") buf.write("\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\25\3\2\2\2\2\27\3\2") buf.write("\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2") buf.write("\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2") buf.write("\2\2\2+\3\2\2\2\2-\3\2\2\2\3\60\3\2\2\2\58\3\2\2\2\7c") buf.write("\3\2\2\2\to\3\2\2\2\13y\3\2\2\2\r{\3\2\2\2\17\u0081\3") buf.write("\2\2\2\21\u008b\3\2\2\2\23\u008e\3\2\2\2\25\u00a7\3\2") buf.write("\2\2\27\u00aa\3\2\2\2\31\u00d2\3\2\2\2\33\u00d4\3\2\2") buf.write("\2\35\u00d6\3\2\2\2\37\u00d8\3\2\2\2!\u00da\3\2\2\2#\u00dc") buf.write("\3\2\2\2%\u00de\3\2\2\2\'\u00e0\3\2\2\2)\u00e2\3\2\2\2") buf.write("+\u00f2\3\2\2\2-\u0101\3\2\2\2/\61\t\2\2\2\60/\3\2\2\2") buf.write("\60\61\3\2\2\2\61\63\3\2\2\2\62\64\t\3\2\2\63\62\3\2\2") buf.write("\2\64\65\3\2\2\2\65\63\3\2\2\2\65\66\3\2\2\2\66\4\3\2") buf.write("\2\2\679\t\2\2\28\67\3\2\2\289\3\2\2\29N\3\2\2\2:<\t\3") buf.write("\2\2;:\3\2\2\2<=\3\2\2\2=;\3\2\2\2=>\3\2\2\2>F\3\2\2\2") buf.write("?C\7\60\2\2@B\t\3\2\2A@\3\2\2\2BE\3\2\2\2CA\3\2\2\2CD") buf.write("\3\2\2\2DG\3\2\2\2EC\3\2\2\2F?\3\2\2\2FG\3\2\2\2GO\3\2") buf.write("\2\2HJ\7\60\2\2IK\t\3\2\2JI\3\2\2\2KL\3\2\2\2LJ\3\2\2") buf.write("\2LM\3\2\2\2MO\3\2\2\2N;\3\2\2\2NH\3\2\2\2OY\3\2\2\2P") buf.write("R\t\4\2\2QS\t\2\2\2RQ\3\2\2\2RS\3\2\2\2SU\3\2\2\2TV\t") buf.write("\3\2\2UT\3\2\2\2VW\3\2\2\2WU\3\2\2\2WX\3\2\2\2XZ\3\2\2") buf.write("\2YP\3\2\2\2YZ\3\2\2\2Z\6\3\2\2\2[\\\7V\2\2\\]\7t\2\2") buf.write("]^\7w\2\2^d\7g\2\2_`\7v\2\2`a\7t\2\2ab\7w\2\2bd\7g\2\2") buf.write("c[\3\2\2\2c_\3\2\2\2d\b\3\2\2\2ef\7H\2\2fg\7c\2\2gh\7") buf.write("n\2\2hi\7u\2\2ip\7g\2\2jk\7h\2\2kl\7c\2\2lm\7n\2\2mn\7") buf.write("u\2\2np\7g\2\2oe\3\2\2\2oj\3\2\2\2p\n\3\2\2\2qr\7P\2\2") buf.write("rs\7q\2\2st\7p\2\2tz\7g\2\2uv\7p\2\2vw\7q\2\2wx\7p\2\2") buf.write("xz\7g\2\2yq\3\2\2\2yu\3\2\2\2z\f\3\2\2\2{|\7u\2\2|}\7") buf.write("g\2\2}~\7n\2\2~\177\7h\2\2\177\16\3\2\2\2\u0080\u0082") buf.write("\t\5\2\2\u0081\u0080\3\2\2\2\u0082\u0083\3\2\2\2\u0083") buf.write("\u0081\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0088\3\2\2\2") buf.write("\u0085\u0087\t\6\2\2\u0086\u0085\3\2\2\2\u0087\u008a\3") buf.write("\2\2\2\u0088\u0086\3\2\2\2\u0088\u0089\3\2\2\2\u0089\20") buf.write("\3\2\2\2\u008a\u0088\3\2\2\2\u008b\u008c\7^\2\2\u008c") buf.write("\u008d\7$\2\2\u008d\22\3\2\2\2\u008e\u008f\7^\2\2\u008f") buf.write("\u0090\7)\2\2\u0090\24\3\2\2\2\u0091\u0098\7$\2\2\u0092") buf.write("\u0097\5\21\t\2\u0093\u0097\13\2\2\2\u0094\u0095\7^\2") buf.write("\2\u0095\u0097\t\7\2\2\u0096\u0092\3\2\2\2\u0096\u0093") buf.write("\3\2\2\2\u0096\u0094\3\2\2\2\u0097\u009a\3\2\2\2\u0098") buf.write("\u0099\3\2\2\2\u0098\u0096\3\2\2\2\u0099\u009b\3\2\2\2") buf.write("\u009a\u0098\3\2\2\2\u009b\u00a8\7$\2\2\u009c\u00a3\7") buf.write(")\2\2\u009d\u00a2\5\23\n\2\u009e\u00a2\13\2\2\2\u009f") buf.write("\u00a0\7^\2\2\u00a0\u00a2\t\7\2\2\u00a1\u009d\3\2\2\2") buf.write("\u00a1\u009e\3\2\2\2\u00a1\u009f\3\2\2\2\u00a2\u00a5\3") buf.write("\2\2\2\u00a3\u00a4\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a4\u00a6") buf.write("\3\2\2\2\u00a5\u00a3\3\2\2\2\u00a6\u00a8\7)\2\2\u00a7") buf.write("\u0091\3\2\2\2\u00a7\u009c\3\2\2\2\u00a8\26\3\2\2\2\u00a9") buf.write("\u00ab\7B\2\2\u00aa\u00a9\3\2\2\2\u00aa\u00ab\3\2\2\2") buf.write("\u00ab\u00ad\3\2\2\2\u00ac\u00ae\t\5\2\2\u00ad\u00ac\3") buf.write("\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00ad\3\2\2\2\u00af\u00b0") buf.write("\3\2\2\2\u00b0\u00b4\3\2\2\2\u00b1\u00b3\t\6\2\2\u00b2") buf.write("\u00b1\3\2\2\2\u00b3\u00b6\3\2\2\2\u00b4\u00b2\3\2\2\2") buf.write("\u00b4\u00b5\3\2\2\2\u00b5\u00cf\3\2\2\2\u00b6\u00b4\3") buf.write("\2\2\2\u00b7\u00cb\7\60\2\2\u00b8\u00ba\t\5\2\2\u00b9") buf.write("\u00b8\3\2\2\2\u00ba\u00bb\3\2\2\2\u00bb\u00b9\3\2\2\2") buf.write("\u00bb\u00bc\3\2\2\2\u00bc\u00c0\3\2\2\2\u00bd\u00bf\t") buf.write("\6\2\2\u00be\u00bd\3\2\2\2\u00bf\u00c2\3\2\2\2\u00c0\u00be") buf.write("\3\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00cc\3\2\2\2\u00c2") buf.write("\u00c0\3\2\2\2\u00c3\u00cc\7\62\2\2\u00c4\u00c8\t\b\2") buf.write("\2\u00c5\u00c7\t\3\2\2\u00c6\u00c5\3\2\2\2\u00c7\u00ca") buf.write("\3\2\2\2\u00c8\u00c6\3\2\2\2\u00c8\u00c9\3\2\2\2\u00c9") buf.write("\u00cc\3\2\2\2\u00ca\u00c8\3\2\2\2\u00cb\u00b9\3\2\2\2") buf.write("\u00cb\u00c3\3\2\2\2\u00cb\u00c4\3\2\2\2\u00cc\u00ce\3") buf.write("\2\2\2\u00cd\u00b7\3\2\2\2\u00ce\u00d1\3\2\2\2\u00cf\u00cd") buf.write("\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\30\3\2\2\2\u00d1\u00cf") buf.write("\3\2\2\2\u00d2\u00d3\7.\2\2\u00d3\32\3\2\2\2\u00d4\u00d5") buf.write("\7<\2\2\u00d5\34\3\2\2\2\u00d6\u00d7\7}\2\2\u00d7\36\3") buf.write("\2\2\2\u00d8\u00d9\7\177\2\2\u00d9 \3\2\2\2\u00da\u00db") buf.write("\7]\2\2\u00db\"\3\2\2\2\u00dc\u00dd\7_\2\2\u00dd$\3\2") buf.write("\2\2\u00de\u00df\7*\2\2\u00df&\3\2\2\2\u00e0\u00e1\7+") buf.write("\2\2\u00e1(\3\2\2\2\u00e2\u00e3\7\61\2\2\u00e3\u00e4\7") buf.write("\61\2\2\u00e4\u00e8\3\2\2\2\u00e5\u00e7\13\2\2\2\u00e6") buf.write("\u00e5\3\2\2\2\u00e7\u00ea\3\2\2\2\u00e8\u00e9\3\2\2\2") buf.write("\u00e8\u00e6\3\2\2\2\u00e9\u00ec\3\2\2\2\u00ea\u00e8\3") buf.write("\2\2\2\u00eb\u00ed\7\17\2\2\u00ec\u00eb\3\2\2\2\u00ec") buf.write("\u00ed\3\2\2\2\u00ed\u00ee\3\2\2\2\u00ee\u00ef\7\f\2\2") buf.write("\u00ef\u00f0\3\2\2\2\u00f0\u00f1\b\25\2\2\u00f1*\3\2\2") buf.write("\2\u00f2\u00f3\7\61\2\2\u00f3\u00f4\7,\2\2\u00f4\u00f8") buf.write("\3\2\2\2\u00f5\u00f7\13\2\2\2\u00f6\u00f5\3\2\2\2\u00f7") buf.write("\u00fa\3\2\2\2\u00f8\u00f9\3\2\2\2\u00f8\u00f6\3\2\2\2") buf.write("\u00f9\u00fb\3\2\2\2\u00fa\u00f8\3\2\2\2\u00fb\u00fc\7") buf.write(",\2\2\u00fc\u00fd\7\61\2\2\u00fd\u00fe\3\2\2\2\u00fe\u00ff") buf.write("\b\26\2\2\u00ff,\3\2\2\2\u0100\u0102\t\t\2\2\u0101\u0100") buf.write("\3\2\2\2\u0102\u0103\3\2\2\2\u0103\u0101\3\2\2\2\u0103") buf.write("\u0104\3\2\2\2\u0104\u0105\3\2\2\2\u0105\u0106\b\27\2") buf.write("\2\u0106.\3\2\2\2$\2\60\658=CFLNRWYcoy\u0083\u0088\u0096") buf.write("\u0098\u00a1\u00a3\u00a7\u00aa\u00af\u00b4\u00bb\u00c0") buf.write("\u00c8\u00cb\u00cf\u00e8\u00ec\u00f8\u0103\3\b\2\2") return buf.getvalue() class pysonLexer(Lexer): atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] INT = 1 FLOAT = 2 TRUE = 3 FALSE = 4 NONE = 5 SELF = 6 KEY = 7 STRING = 8 OBJECT = 9 COLON = 10 COMMA = 11 LEFT_DICT = 12 RIGHT_DICT = 13 LEFT_LIST = 14 RIGHT_LIST = 15 LEFT_BUKKET = 16 RIGHT_BUKKEFT = 17 LINE_COMMENT = 18 COMMENT = 19 WS = 20 channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] modeNames = [ "DEFAULT_MODE" ] literalNames = [ "<INVALID>", "'self'", "','", "':'", "'{'", "'}'", "'['", "']'", "'('", "')'" ] symbolicNames = [ "<INVALID>", "INT", "FLOAT", "TRUE", "FALSE", "NONE", "SELF", "KEY", "STRING", "OBJECT", "COLON", "COMMA", "LEFT_DICT", "RIGHT_DICT", "LEFT_LIST", "RIGHT_LIST", "LEFT_BUKKET", "RIGHT_BUKKEFT", "LINE_COMMENT", "COMMENT", "WS" ] ruleNames = [ "INT", "FLOAT", "TRUE", "FALSE", "NONE", "SELF", "KEY", "ESC_DOUBLE", "ESC_SINGLE", "STRING", "OBJECT", "COLON", "COMMA", "LEFT_DICT", "RIGHT_DICT", "LEFT_LIST", "RIGHT_LIST", "LEFT_BUKKET", "RIGHT_BUKKEFT", "LINE_COMMENT", "COMMENT", "WS" ] grammarFileName = "pyson.g4" def __init__(self, input=None, output:TextIO = sys.stdout): super().__init__(input, output) self.checkVersion("4.7.1") self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) self._actions = None self._predicates = None
58.207447
103
0.561181
2,653
10,943
2.299661
0.139465
0.147189
0.093919
0.097689
0.279626
0.170956
0.088182
0.066055
0.056056
0.054253
0
0.321952
0.146212
10,943
187
104
58.518717
0.33105
0.003473
0
0
1
0.588235
0.622145
0.584534
0
0
0
0
0
1
0.011765
false
0
0.023529
0
0.211765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
1
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
94b43b82a4c3973de43f127e6ed4a25ea4b5e598
31
py
Python
importscan/__init__.py
RonnyPfannschmidt-RedHat/importscan
ae54da040611e44e36615b7855b7b82436a8b624
[ "BSD-3-Clause" ]
2
2019-08-23T07:10:04.000Z
2019-11-15T13:13:06.000Z
importscan/__init__.py
RonnyPfannschmidt-RedHat/importscan
ae54da040611e44e36615b7855b7b82436a8b624
[ "BSD-3-Clause" ]
3
2016-05-12T09:28:14.000Z
2018-08-16T08:26:36.000Z
importscan/__init__.py
RonnyPfannschmidt-RedHat/importscan
ae54da040611e44e36615b7855b7b82436a8b624
[ "BSD-3-Clause" ]
3
2016-05-04T07:08:23.000Z
2018-08-16T08:26:53.000Z
from .scan import scan # noqa
15.5
30
0.709677
5
31
4.4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.225806
31
1
31
31
0.916667
0.129032
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
94d3b37adfd476a0d8bd5f7caf4fd7e2923563de
114
py
Python
__OTHER__/MonitorIT/AppLauncher/backend/accounts.py
APetrishchev/Launcher
be0600997db9d0573acaa3339206c299a5fa5d40
[ "Apache-2.0" ]
null
null
null
__OTHER__/MonitorIT/AppLauncher/backend/accounts.py
APetrishchev/Launcher
be0600997db9d0573acaa3339206c299a5fa5d40
[ "Apache-2.0" ]
null
null
null
__OTHER__/MonitorIT/AppLauncher/backend/accounts.py
APetrishchev/Launcher
be0600997db9d0573acaa3339206c299a5fa5d40
[ "Apache-2.0" ]
null
null
null
from AppLauncher.backend.models.accounts import Account as ModelsAccount class Account(ModelsAccount): pass
22.8
73
0.815789
13
114
7.153846
0.846154
0
0
0
0
0
0
0
0
0
0
0
0.131579
114
4
74
28.5
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
94f530efcf32213ff94b19f1d115506ff5ee32a4
62
py
Python
pyxb/bundles/opengis/iso19139/v20070417/gco.py
eLBati/pyxb
14737c23a125fd12c954823ad64fc4497816fae3
[ "Apache-2.0" ]
123
2015-01-12T06:43:22.000Z
2022-03-20T18:06:46.000Z
pyxb/bundles/opengis/iso19139/v20070417/gco.py
eLBati/pyxb
14737c23a125fd12c954823ad64fc4497816fae3
[ "Apache-2.0" ]
103
2015-01-08T18:35:57.000Z
2022-01-18T01:44:14.000Z
pyxb/bundles/opengis/iso19139/v20070417/gco.py
eLBati/pyxb
14737c23a125fd12c954823ad64fc4497816fae3
[ "Apache-2.0" ]
54
2015-02-15T17:12:00.000Z
2022-03-07T23:02:32.000Z
from pyxb.bundles.opengis.iso19139.v20070417.raw.gco import *
31
61
0.822581
9
62
5.666667
1
0
0
0
0
0
0
0
0
0
0
0.224138
0.064516
62
1
62
62
0.655172
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a21659e0c7831644718cbd7c3483d6da7a720315
1,774
py
Python
tests/forecast_strategy/test_naive_fs.py
vshulyak/ts-eval
2049b1268cf4272f5fa1471851523f8da14dd84c
[ "MIT" ]
1
2021-07-12T08:58:07.000Z
2021-07-12T08:58:07.000Z
tests/forecast_strategy/test_naive_fs.py
vshulyak/ts-eval
2049b1268cf4272f5fa1471851523f8da14dd84c
[ "MIT" ]
null
null
null
tests/forecast_strategy/test_naive_fs.py
vshulyak/ts-eval
2049b1268cf4272f5fa1471851523f8da14dd84c
[ "MIT" ]
null
null
null
import pytest from ts_eval.forecast_strategy.naive import ( NaiveForecastStrategy, SNaiveForecastStrategy, ) H = 24 TRAIN_TEST_SPLIT = 100 @pytest.mark.parametrize( "endog", ["dataset_1d", "dataset_1d__pd_index_ordinal", "dataset_1d__pd_index_datetime"], indirect=["endog"], ) def test_fc_strategy__naive(endog): """ Tests interative prediction on different input data (numpy/pandas/None) """ preds_3d = ( NaiveForecastStrategy(endog, train_test_split_index=TRAIN_TEST_SPLIT) .forecast(h=H) .numpy() ) assert preds_3d.shape[0] == endog.shape[0] - TRAIN_TEST_SPLIT - H assert preds_3d.shape[1] == H assert preds_3d.shape[2] == 3 @pytest.mark.parametrize( "endog", ["dataset_1d", "dataset_1d__pd_index_ordinal", "dataset_1d__pd_index_datetime"], indirect=["endog"], ) def test_fc_strategy__snaive(endog): """ Tests interative prediction on different input data (numpy/pandas/None) """ preds_3d = ( SNaiveForecastStrategy(endog, train_test_split_index=TRAIN_TEST_SPLIT) .forecast(h=H) .numpy() ) assert preds_3d.shape[0] == endog.shape[0] - TRAIN_TEST_SPLIT - H assert preds_3d.shape[1] == H assert preds_3d.shape[2] == 3 @pytest.mark.parametrize("endog", ["dataset_1d__pd_index_datetime"], indirect=["endog"]) def test_fc_strategy__xarray_dt(endog): """ Tests interative prediction on different input data (numpy/pandas/None) """ preds_3d = ( SNaiveForecastStrategy(endog, train_test_split_index=TRAIN_TEST_SPLIT) .forecast(h=H) .xarray() ) assert preds_3d.dt.shape[0] == endog.shape[0] - TRAIN_TEST_SPLIT assert preds_3d.dt[0] == endog[TRAIN_TEST_SPLIT:].index[0]
25.710145
88
0.682638
229
1,774
4.947598
0.213974
0.087379
0.135922
0.095322
0.832304
0.811121
0.811121
0.811121
0.78376
0.78376
0
0.025892
0.194476
1,774
68
89
26.088235
0.76697
0.121195
0
0.545455
0
0
0.127561
0.094514
0
0
0
0
0.181818
1
0.068182
false
0
0.045455
0
0.113636
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a2268bca2fa8941b621a96b4e155f151f60a3198
94
py
Python
office365/sharepoint/fields/fieldMultiChoice.py
wreiner/Office365-REST-Python-Client
476bbce4f5928a140b4f5d33475d0ac9b0783530
[ "MIT" ]
null
null
null
office365/sharepoint/fields/fieldMultiChoice.py
wreiner/Office365-REST-Python-Client
476bbce4f5928a140b4f5d33475d0ac9b0783530
[ "MIT" ]
null
null
null
office365/sharepoint/fields/fieldMultiChoice.py
wreiner/Office365-REST-Python-Client
476bbce4f5928a140b4f5d33475d0ac9b0783530
[ "MIT" ]
null
null
null
from office365.sharepoint.fields.field import Field class FieldMultiChoice(Field): pass
15.666667
51
0.797872
11
94
6.818182
0.818182
0
0
0
0
0
0
0
0
0
0
0.037037
0.138298
94
5
52
18.8
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
bf6727bf91c6d2a92532fe5df66638eacf9c69d9
1,337
py
Python
irida_sistr_results/tests/unit/test_irida_sistr_workflow.py
phac-nml/irida-sistr-results
c3994202bf54366abd06dfa90e14025599300d77
[ "Apache-2.0" ]
null
null
null
irida_sistr_results/tests/unit/test_irida_sistr_workflow.py
phac-nml/irida-sistr-results
c3994202bf54366abd06dfa90e14025599300d77
[ "Apache-2.0" ]
10
2018-04-18T20:56:12.000Z
2020-07-24T20:11:01.000Z
irida_sistr_results/tests/unit/test_irida_sistr_workflow.py
phac-nml/irida-sistr-results
c3994202bf54366abd06dfa90e14025599300d77
[ "Apache-2.0" ]
null
null
null
import unittest from irida_sistr_results.irida_sistr_workflow import IridaSistrWorkflow class IridaSistrWorkflowTest(unittest.TestCase): def test_workflow_ids_or_versions_to_ids_version(self): ids = IridaSistrWorkflow.workflow_ids_or_versions_to_ids(['0.1']) self.assertEqual(['e559af58-a560-4bbd-997e-808bfbe026e2'], ids, "Invalid ids") def test_workflow_ids_or_versions_to_ids_id(self): ids = IridaSistrWorkflow.workflow_ids_or_versions_to_ids(['e559af58-a560-4bbd-997e-808bfbe026e2']) self.assertEqual(['e559af58-a560-4bbd-997e-808bfbe026e2'], ids, "Invalid ids") def test_workflow_ids_or_versions_to_ids_id_and_version(self): ids = IridaSistrWorkflow.workflow_ids_or_versions_to_ids(['e559af58-a560-4bbd-997e-808bfbe026e2', '0.2']) self.assertEqual(['e559af58-a560-4bbd-997e-808bfbe026e2', 'e8f9cc61-3264-48c6-81d9-02d9e84bccc7'], ids, "Invalid ids") def test_workflow_ids_or_versions_to_ids_invalid_version(self): self.assertRaises(KeyError, IridaSistrWorkflow.workflow_ids_or_versions_to_ids, ['0.1x']) def test_workflow_ids_or_versions_to_ids_invalid_id(self): self.assertRaises(KeyError, IridaSistrWorkflow.workflow_ids_or_versions_to_ids, ['Xe8f9cc61-3264-48c6-81d9-02d9e84bccc7'])
44.566667
113
0.755423
169
1,337
5.573965
0.236686
0.116773
0.138004
0.22293
0.785563
0.785563
0.785563
0.735669
0.698514
0.64862
0
0.123252
0.144353
1,337
29
114
46.103448
0.700175
0
0
0.111111
0
0
0.221391
0.18923
0
0
0
0
0.277778
1
0.277778
false
0
0.111111
0
0.444444
0
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
bfc02c7c97d68984e9d81934fced1877bd632364
76
py
Python
windypie/__init__.py
codeforamerica/windypie
73568f2cf12a8c0427628da91e7ad4c554843046
[ "BSD-2-Clause" ]
1
2019-09-16T07:52:01.000Z
2019-09-16T07:52:01.000Z
windypie/__init__.py
leeinwoo/windypie
73568f2cf12a8c0427628da91e7ad4c554843046
[ "BSD-2-Clause" ]
null
null
null
windypie/__init__.py
leeinwoo/windypie
73568f2cf12a8c0427628da91e7ad4c554843046
[ "BSD-2-Clause" ]
3
2016-10-28T14:21:51.000Z
2021-04-17T10:38:46.000Z
from . import windypie from .windypie import WindyPie, SocrataPythonAdapter
25.333333
52
0.842105
8
76
8
0.5
0.4375
0
0
0
0
0
0
0
0
0
0
0.118421
76
2
53
38
0.955224
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6