hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2ec6377b6cc9303e46211e079b606a72c0cb73b
| 216
|
py
|
Python
|
app/services/category_services/list.py
|
brunocamposal/fast-food-simulator
|
6dc7f33cdebd222998fc88df9264853c741c64ca
|
[
"MIT"
] | 2
|
2021-01-11T23:47:17.000Z
|
2021-01-13T13:16:50.000Z
|
app/services/category_services/list.py
|
brunocamposal/kitchin-kanri
|
6dc7f33cdebd222998fc88df9264853c741c64ca
|
[
"MIT"
] | 7
|
2021-01-13T13:16:46.000Z
|
2021-01-21T16:07:28.000Z
|
app/services/category_services/list.py
|
brunocamposal/kitchin-kanri
|
6dc7f33cdebd222998fc88df9264853c741c64ca
|
[
"MIT"
] | null | null | null |
from app.models import Category
from app.serializer.category_schema import categories_schema
from http import HTTPStatus
def category_list():
return categories_schema.jsonify(Category.query.all()), HTTPStatus.OK
| 36
| 73
| 0.833333
| 29
| 216
| 6.068966
| 0.586207
| 0.079545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097222
| 216
| 6
| 73
| 36
| 0.902564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
a2ef2f44d915accf4336e360cd9905b8232fadb6
| 156
|
py
|
Python
|
scrapy-redis/tests/test_package_import.py
|
GongkunJiang/MySpider
|
8c088f696679b13568843af521279f9f25f40314
|
[
"MIT"
] | 3,305
|
2017-07-01T09:19:10.000Z
|
2022-03-31T10:22:21.000Z
|
scrapy-redis/tests/test_package_import.py
|
GongkunJiang/MySpider
|
8c088f696679b13568843af521279f9f25f40314
|
[
"MIT"
] | 129
|
2017-07-03T23:19:23.000Z
|
2022-03-29T18:01:29.000Z
|
scrapy-redis/tests/test_package_import.py
|
GongkunJiang/MySpider
|
8c088f696679b13568843af521279f9f25f40314
|
[
"MIT"
] | 995
|
2017-07-02T04:09:27.000Z
|
2022-03-30T10:46:25.000Z
|
import scrapy_redis
def test_package_metadata():
assert scrapy_redis.__author__
assert scrapy_redis.__email__
assert scrapy_redis.__version__
| 19.5
| 35
| 0.807692
| 19
| 156
| 5.684211
| 0.578947
| 0.407407
| 0.472222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 156
| 7
| 36
| 22.285714
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2fdad07403aa04563e846da42c76d367d5963ea
| 64
|
py
|
Python
|
torchlab/encoder/__init__.py
|
MarvinTeichmann/TorchLab
|
d837dfddf893559a259f31a1980986033665cac3
|
[
"MIT"
] | 3
|
2019-08-29T00:23:28.000Z
|
2020-12-07T11:13:54.000Z
|
torchlab/encoder/__init__.py
|
MarvinTeichmann/TorchLab
|
d837dfddf893559a259f31a1980986033665cac3
|
[
"MIT"
] | null | null | null |
torchlab/encoder/__init__.py
|
MarvinTeichmann/TorchLab
|
d837dfddf893559a259f31a1980986033665cac3
|
[
"MIT"
] | null | null | null |
from . import resnet
from . import vgg
# from . import densenet
| 16
| 24
| 0.734375
| 9
| 64
| 5.222222
| 0.555556
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203125
| 64
| 3
| 25
| 21.333333
| 0.921569
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c2e4fb7c87dadb36ba2c4f4ee4d66133d257195
| 141
|
py
|
Python
|
tpc_webapp/blog/admin.py
|
sahilpabale/ThePunchCoders-Website
|
b80226e053b194882ce520e5afe8b123f0a57630
|
[
"MIT"
] | null | null | null |
tpc_webapp/blog/admin.py
|
sahilpabale/ThePunchCoders-Website
|
b80226e053b194882ce520e5afe8b123f0a57630
|
[
"MIT"
] | 3
|
2021-03-30T13:31:05.000Z
|
2021-09-22T19:00:31.000Z
|
tpc_webapp/blog/admin.py
|
sahilpabale/ThePunchCoders-Website
|
b80226e053b194882ce520e5afe8b123f0a57630
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Post, BlogComment
# Register your models here.
admin.site.register((Post, BlogComment))
| 28.2
| 40
| 0.801418
| 19
| 141
| 5.947368
| 0.631579
| 0.265487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113475
| 141
| 5
| 40
| 28.2
| 0.904
| 0.184397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a78ce19174a232532590443629fa08a0bf25a2cf
| 2,644
|
py
|
Python
|
tests/test_time_control.py
|
erdc/AdhModel
|
2c5d49dd4cca484a6c46ded6e1f6dec25db4722c
|
[
"BSD-3-Clause"
] | 3
|
2019-06-26T13:41:46.000Z
|
2019-10-16T02:11:29.000Z
|
tests/test_time_control.py
|
erdc/AdhModel
|
2c5d49dd4cca484a6c46ded6e1f6dec25db4722c
|
[
"BSD-3-Clause"
] | 5
|
2019-06-26T14:29:03.000Z
|
2019-07-15T19:25:59.000Z
|
tests/test_time_control.py
|
erdc/AdhModel
|
2c5d49dd4cca484a6c46ded6e1f6dec25db4722c
|
[
"BSD-3-Clause"
] | 2
|
2019-07-26T14:31:14.000Z
|
2019-09-03T18:06:39.000Z
|
import unittest
from adhmodel.simulation.time_control import TimeControl
class TestIo(unittest.TestCase):
def test_dependency_time_step_option(self):
tc = TimeControl()
# test to ensure the string objects haven't changed
base_list = ['Steady state solution (TC STD)', 'Time step series (SERIES DT)',
'Auto Time Step Find (TC ATF)']
curr_list = list(tc.param.time_step_option.objects)
self.assertListEqual(base_list, curr_list, 'param.ObjectSelector objects have changed.')
# test dependencies on TC STD
tc.time_step_option = 'Steady state solution (TC STD)'
self.assertLess(tc.param.max_time_step_size_time_series.precedence, 0)
self.assertGreater(tc.param.steady_state_min_time_step_size.precedence, 0)
self.assertGreater(tc.param.steady_state_max_time_step_size.precedence, 0)
self.assertLess(tc.param.auto_time_step_find_min_time_step_size.precedence, 0)
self.assertLess(tc.param.auto_time_step_find_max_time_step_size_series.precedence, 0)
# test dependencies on SERIES DT
tc.time_step_option = 'Time step series (SERIES DT)'
self.assertGreater(tc.param.max_time_step_size_time_series.precedence, 0)
self.assertLess(tc.param.steady_state_min_time_step_size.precedence, 0)
self.assertLess(tc.param.steady_state_max_time_step_size.precedence, 0)
self.assertLess(tc.param.auto_time_step_find_min_time_step_size.precedence, 0)
self.assertLess(tc.param.auto_time_step_find_max_time_step_size_series.precedence, 0)
# test dependencies on TC ATF
tc.time_step_option = 'Auto Time Step Find (TC ATF)'
self.assertLess(tc.param.max_time_step_size_time_series.precedence, 0)
self.assertLess(tc.param.steady_state_min_time_step_size.precedence, 0)
self.assertLess(tc.param.steady_state_max_time_step_size.precedence, 0)
self.assertGreater(tc.param.auto_time_step_find_min_time_step_size.precedence, 0)
self.assertGreater(tc.param.auto_time_step_find_max_time_step_size_series.precedence, 0)
# test dependecies on TC STD
tc.time_step_option = 'Steady state solution (TC STD)'
self.assertLess(tc.param.max_time_step_size_time_series.precedence, 0)
self.assertGreater(tc.param.steady_state_min_time_step_size.precedence, 0)
self.assertGreater(tc.param.steady_state_max_time_step_size.precedence, 0)
self.assertLess(tc.param.auto_time_step_find_min_time_step_size.precedence, 0)
self.assertLess(tc.param.auto_time_step_find_max_time_step_size_series.precedence, 0)
| 62.952381
| 96
| 0.754917
| 386
| 2,644
| 4.810881
| 0.134715
| 0.163705
| 0.129241
| 0.147011
| 0.790522
| 0.753904
| 0.731287
| 0.731287
| 0.731287
| 0.731287
| 0
| 0.00905
| 0.164145
| 2,644
| 41
| 97
| 64.487805
| 0.831222
| 0.061649
| 0
| 0.575758
| 0
| 0
| 0.098586
| 0
| 0
| 0
| 0
| 0
| 0.636364
| 1
| 0.030303
| false
| 0
| 0.060606
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a79d1e4b8e52ab49fc9e107752ec59a42ab9eec0
| 22,321
|
py
|
Python
|
activatable_model/tests/tests.py
|
ambitioninc/django-activatable-model
|
e1967e44d97a03b1a6f1723aa3241bc56ab23eb7
|
[
"MIT"
] | 16
|
2015-02-15T18:41:17.000Z
|
2021-04-13T15:53:45.000Z
|
activatable_model/tests/tests.py
|
ambitioninc/django-activatable-model
|
e1967e44d97a03b1a6f1723aa3241bc56ab23eb7
|
[
"MIT"
] | 5
|
2015-03-30T17:40:10.000Z
|
2021-12-18T12:55:30.000Z
|
activatable_model/tests/tests.py
|
ambitioninc/django-activatable-model
|
e1967e44d97a03b1a6f1723aa3241bc56ab23eb7
|
[
"MIT"
] | 9
|
2015-03-30T16:21:20.000Z
|
2018-10-08T14:38:33.000Z
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from django.test import TestCase, TransactionTestCase
from django_dynamic_fixture import G
from mock import patch, MagicMock, call
from activatable_model.models import BaseActivatableModel
from activatable_model.signals import model_activations_changed, model_activations_updated
from activatable_model.validation import get_activatable_models, validate_activatable_models
from activatable_model.tests.models import (
ActivatableModel,
ActivatableModelWRel,
Rel,
ActivatableModelWNonDefaultField,
ActivatableModelWRelAndCascade,
)
class BaseMockActivationsSignalHanderTest(TestCase):
"""
Connects a mock to the model_activations_changed signal so that it can be easily tested.
"""
def setUp(self):
super(BaseMockActivationsSignalHanderTest, self).setUp()
self.mock_model_activations_changed_handler = MagicMock()
model_activations_changed.connect(self.mock_model_activations_changed_handler)
self.mock_model_activations_updated_handler = MagicMock()
model_activations_updated.connect(self.mock_model_activations_updated_handler)
def tearDown(self):
super(BaseMockActivationsSignalHanderTest, self).tearDown()
model_activations_changed.disconnect(self.mock_model_activations_changed_handler)
class CascadeTest(TransactionTestCase):
"""
Tests that cascade deletes cant happen on an activatable test model.
"""
def test_no_cascade(self):
rel = G(Rel)
G(ActivatableModelWRel, rel_field=rel)
with self.assertRaises(models.ProtectedError):
rel.delete()
def test_allowed_cascade(self):
rel = G(Rel)
rel_id = rel.id
G(ActivatableModelWRelAndCascade, rel_field=rel)
rel.delete()
self.assertEqual(ActivatableModelWRelAndCascade.objects.filter(id=rel_id).count(), 0)
class ManagerQuerySetTest(BaseMockActivationsSignalHanderTest):
"""
Tests custom functionality in the manager and queryset for activatable models. Tests it
on models that use the default is_active field and models that define their own
custom activatable field.
"""
def test_update_no_is_active(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=False)
ActivatableModel.objects.update(char_field='hi')
self.assertEquals(ActivatableModel.objects.filter(char_field='hi', is_active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_update_no_is_active_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=False)
ActivatableModelWNonDefaultField.objects.update(char_field='hi')
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(char_field='hi', active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_update_w_is_active(self):
m1 = G(ActivatableModel, is_active=False)
m2 = G(ActivatableModel, is_active=False)
ActivatableModel.objects.filter(is_active=False).update(char_field='hi', is_active=True)
self.assertEquals(ActivatableModel.objects.filter(char_field='hi', is_active=True).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(set(call_args[1]['instance_ids']), set([m1.id, m2.id]))
self.assertEquals(call_args[1]['sender'], ActivatableModel)
def test_update_w_is_active_custom(self):
m1 = G(ActivatableModelWNonDefaultField, active=False)
m2 = G(ActivatableModelWNonDefaultField, active=False)
ActivatableModelWNonDefaultField.objects.update(char_field='hi', active=True)
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(char_field='hi', active=True).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(set(call_args[1]['instance_ids']), set([m1.id, m2.id]))
self.assertEquals(call_args[1]['sender'], ActivatableModelWNonDefaultField)
def test_activate(self):
models = [
G(ActivatableModel, is_active=False),
G(ActivatableModel, is_active=True),
]
ActivatableModel.objects.activate()
self.assertEquals(ActivatableModel.objects.filter(is_active=True).count(), 2)
static_kwargs = {
'sender': ActivatableModel,
'signal': model_activations_changed,
}
self.mock_model_activations_changed_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[0].id], is_active=True, **static_kwargs),
])
static_kwargs['signal'] = model_activations_updated
self.mock_model_activations_updated_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[0].id, models[1].id], is_active=True, **static_kwargs),
])
def test_activate_custom(self):
models = [
G(ActivatableModelWNonDefaultField, active=False),
G(ActivatableModelWNonDefaultField, active=True),
]
ActivatableModelWNonDefaultField.objects.activate()
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=True).count(), 2)
static_kwargs = {
'sender': ActivatableModelWNonDefaultField,
'signal': model_activations_changed,
}
self.mock_model_activations_changed_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[0].id], is_active=True, **static_kwargs),
])
static_kwargs['signal'] = model_activations_updated
self.mock_model_activations_updated_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[0].id, models[1].id], is_active=True, **static_kwargs),
])
def test_deactivate(self):
models = [
G(ActivatableModel, is_active=False),
G(ActivatableModel, is_active=True),
]
ActivatableModel.objects.deactivate()
self.assertEquals(ActivatableModel.objects.filter(is_active=False).count(), 2)
static_kwargs = {
'sender': ActivatableModel,
'signal': model_activations_changed,
}
self.mock_model_activations_changed_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[1].id], is_active=False, **static_kwargs),
])
static_kwargs['signal'] = model_activations_updated
self.mock_model_activations_updated_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[0].id, models[1].id], is_active=False, **static_kwargs),
])
def test_deactivate_custom(self):
models = [
G(ActivatableModelWNonDefaultField, active=False),
G(ActivatableModelWNonDefaultField, active=True),
]
ActivatableModelWNonDefaultField.objects.deactivate()
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=False).count(), 2)
static_kwargs = {
'sender': ActivatableModelWNonDefaultField,
'signal': model_activations_changed,
}
self.mock_model_activations_changed_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[1].id], is_active=False, **static_kwargs),
])
static_kwargs['signal'] = model_activations_updated
self.mock_model_activations_updated_handler.assert_has_calls([
call(instance_ids=[models[0].id], is_active=False, **static_kwargs),
call(instance_ids=[models[1].id], is_active=True, **static_kwargs),
call(instance_ids=[models[0].id, models[1].id], is_active=False, **static_kwargs),
])
def test_delete_no_force(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=True)
ActivatableModel.objects.all().delete()
self.assertEquals(ActivatableModel.objects.filter(is_active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_delete_no_force_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=True)
ActivatableModelWNonDefaultField.objects.all().delete()
self.assertEquals(ActivatableModelWNonDefaultField.objects.filter(active=False).count(), 2)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 3)
def test_delete_w_force(self):
G(ActivatableModel, is_active=False)
G(ActivatableModel, is_active=True)
ActivatableModel.objects.all().delete(force=True)
self.assertFalse(ActivatableModel.objects.exists())
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
def test_delete_w_force_custom(self):
G(ActivatableModelWNonDefaultField, active=False)
G(ActivatableModelWNonDefaultField, active=True)
ActivatableModelWNonDefaultField.objects.all().delete(force=True)
self.assertFalse(ActivatableModelWNonDefaultField.objects.exists())
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
class SaveTest(BaseMockActivationsSignalHanderTest):
"""
Tests the custom save function in the BaseActivatableModel.
"""
def test_create(self):
m = G(ActivatableModel, is_active=False)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], False)
self.assertEquals(call_args[1]['instance_ids'], [m.id])
self.assertEquals(call_args[1]['sender'], ActivatableModel)
updated_call_args = self.mock_model_activations_updated_handler.call_args
self.assertEquals(updated_call_args[1]['is_active'], False)
self.assertEquals(updated_call_args[1]['instance_ids'], [m.id])
self.assertEquals(updated_call_args[1]['sender'], ActivatableModel)
def test_save_not_changed(self):
m = G(ActivatableModel, is_active=False)
m.is_active = False
m.save()
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 1)
self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2)
def test_save_changed(self):
m = G(ActivatableModel, is_active=False)
m.is_active = True
m.save()
# changed
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(call_args[1]['instance_ids'], [m.id])
self.assertEquals(call_args[1]['sender'], ActivatableModel)
# updated
self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2)
updated_call_args = self.mock_model_activations_updated_handler.call_args
self.assertEquals(updated_call_args[1]['is_active'], True)
self.assertEquals(updated_call_args[1]['instance_ids'], [m.id])
self.assertEquals(updated_call_args[1]['sender'], ActivatableModel)
def test_save_changed_custom(self):
m = G(ActivatableModelWNonDefaultField, active=False)
m.active = True
m.save()
# changed
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
call_args = self.mock_model_activations_changed_handler.call_args
self.assertEquals(call_args[1]['is_active'], True)
self.assertEquals(call_args[1]['instance_ids'], [m.id])
self.assertEquals(call_args[1]['sender'], ActivatableModelWNonDefaultField)
# updated
self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2)
updated_call_args = self.mock_model_activations_updated_handler.call_args
self.assertEquals(updated_call_args[1]['is_active'], True)
self.assertEquals(updated_call_args[1]['instance_ids'], [m.id])
self.assertEquals(updated_call_args[1]['sender'], ActivatableModelWNonDefaultField)
class SingleDeleteTest(BaseMockActivationsSignalHanderTest):
"""
Tests calling delete on a single model that inherits BaseActivatableModel.
"""
def test_delete_no_force_no_active_changed(self):
m = G(ActivatableModel, is_active=False)
m.delete()
m = ActivatableModel.objects.get(id=m.id)
self.assertFalse(m.is_active)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 1)
self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2)
def test_delete_no_force_active_changed(self):
m = G(ActivatableModel, is_active=True)
m.delete()
m = ActivatableModel.objects.get(id=m.id)
self.assertFalse(m.is_active)
self.assertEquals(self.mock_model_activations_changed_handler.call_count, 2)
self.assertEquals(self.mock_model_activations_updated_handler.call_count, 2)
def test_delete_force(self):
m = G(ActivatableModel, is_active=False)
m.delete(force=True)
self.assertFalse(ActivatableModel.objects.exists())
class ValidateDbTest(TestCase):
"""
Tests that activatable models are validated properly upon pre_syncdb signal.
"""
def test_get_activatable_models(self):
activatable_models = get_activatable_models()
self.assertEquals(
set(
[
ActivatableModel,
ActivatableModelWRel,
ActivatableModelWRelAndCascade,
ActivatableModelWNonDefaultField
]
),
set(activatable_models)
)
def test_all_valid_models(self):
"""
All models should validate fine.
"""
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_activatable_field_is_not_boolean(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class NonBooleanModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.CharField()
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [NonBooleanModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_activatable_field_is_not_defined(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class NoValidFieldModel(BaseActivatableModel):
class Meta:
abstract = True
ACTIVATABLE_FIELD_NAME = 'active'
is_active = models.BooleanField()
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [NoValidFieldModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_foreign_key_is_null(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_foreign_key_protect(self, mock_get_activatable_models):
"""
PROTECT is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.ForeignKey(ContentType, null=True, on_delete=models.PROTECT)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_foreign_key_cascade(self, mock_get_activatable_models):
"""
The default cascade behavior is invalid for activatable models.
"""
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.ForeignKey(ContentType, on_delete=models.CASCADE)
mock_get_activatable_models.return_value = [CascadableModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
@patch('activatable_model.validation.get_activatable_models')
def test_one_to_one_is_null(self, mock_get_activatable_models):
"""
SET_NULL is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.OneToOneField(ContentType, null=True, on_delete=models.SET_NULL)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_one_to_one_protect(self, mock_get_activatable_models):
"""
PROTECT is a valid option for foreign keys in activatable models.
"""
# Make this an object and not an actual django model. This prevents it from always
# being included when syncing the db. This is true for all other test models in this file.
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.OneToOneField(ContentType, null=True, on_delete=models.PROTECT)
mock_get_activatable_models.return_value = [CascadableModel]
validate_activatable_models()
self.assertEquals(mock_get_activatable_models.call_count, 1)
@patch('activatable_model.validation.get_activatable_models')
def test_one_to_one_cascade(self, mock_get_activatable_models):
"""
The default cascade behavior is invalid for activatable models.
"""
class CascadableModel(BaseActivatableModel):
class Meta:
abstract = True
is_active = models.BooleanField(default=False)
ctype = models.OneToOneField(ContentType, on_delete=models.CASCADE)
mock_get_activatable_models.return_value = [CascadableModel]
with self.assertRaises(ValidationError):
validate_activatable_models()
class ModelUpdatedSignalTest(BaseMockActivationsSignalHanderTest):
"""
Tests the updated signal test
"""
def test_no_activatable_field_updated(self):
m = G(ActivatableModel, is_active=False)
m_from_db = ActivatableModel.objects.get(id=m.id)
m_from_db.char_field = 'foo'
m_from_db.save()
self.assertEquals(self.mock_model_activations_updated_handler.call_count, 1)
| 46.405405
| 116
| 0.703374
| 2,522
| 22,321
| 5.954401
| 0.071372
| 0.039955
| 0.034627
| 0.063928
| 0.827396
| 0.812346
| 0.786975
| 0.77066
| 0.749151
| 0.721516
| 0
| 0.005306
| 0.206263
| 22,321
| 480
| 117
| 46.502083
| 0.842298
| 0.098876
| 0
| 0.626781
| 0
| 0
| 0.036455
| 0.020629
| 0
| 0
| 0
| 0
| 0.219373
| 1
| 0.096866
| false
| 0
| 0.02849
| 0
| 0.190883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
38f224c6d42f60314d43862c7f76924112fe0231
| 48
|
py
|
Python
|
enthought/pyface/image_cache.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/image_cache.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/image_cache.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.image_cache import *
| 16
| 32
| 0.791667
| 7
| 48
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 33
| 24
| 0.902439
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac08a98a9428604b077c0ef95dba37df5e4324d8
| 232
|
py
|
Python
|
repartee/views.py
|
multiple1902/repartee
|
bfbd1b8433c086ee3ae877f08156949515c3c977
|
[
"Apache-2.0",
"MIT"
] | 1
|
2017-05-15T10:39:29.000Z
|
2017-05-15T10:39:29.000Z
|
repartee/views.py
|
multiple1902/repartee
|
bfbd1b8433c086ee3ae877f08156949515c3c977
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
repartee/views.py
|
multiple1902/repartee
|
bfbd1b8433c086ee3ae877f08156949515c3c977
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from django.shortcuts import render_to_response
from django.template import loader,Context, RequestContext
def homepage(request):
return render_to_response("index.html", {
}, context_instance = RequestContext(request))
| 33.142857
| 58
| 0.784483
| 27
| 232
| 6.555556
| 0.666667
| 0.112994
| 0.180791
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133621
| 232
| 6
| 59
| 38.666667
| 0.880597
| 0
| 0
| 0
| 0
| 0
| 0.043103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ac1e3a17249ed7fabec26e3144c7b5c9e12e2296
| 26
|
py
|
Python
|
PythonStuff1.py
|
OSHI7/Learning1
|
fa8014066e226465bb7989fbbb82a35412c4f634
|
[
"MIT"
] | null | null | null |
PythonStuff1.py
|
OSHI7/Learning1
|
fa8014066e226465bb7989fbbb82a35412c4f634
|
[
"MIT"
] | 3
|
2020-03-24T18:02:39.000Z
|
2020-10-06T21:32:23.000Z
|
PythonStuff1.py
|
OSHI7/Learning1
|
fa8014066e226465bb7989fbbb82a35412c4f634
|
[
"MIT"
] | 1
|
2017-07-31T13:15:54.000Z
|
2017-07-31T13:15:54.000Z
|
import os
print('hello')
| 6.5
| 14
| 0.692308
| 4
| 26
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 3
| 15
| 8.666667
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
3bc1e75a9aa4aaf79ecf9267d703153fb4b64770
| 147
|
py
|
Python
|
Coding-Challenges/maxProfitWithKTransactions/max_profit_with_k_transactions.py
|
FergusDevelopmentLLC/Coders-Workshop
|
3513bd5f79eaa85b4d2a648c5f343a224842325d
|
[
"MIT"
] | 33
|
2019-12-02T23:29:47.000Z
|
2022-03-24T02:40:36.000Z
|
Coding-Challenges/maxProfitWithKTransactions/max_profit_with_k_transactions.py
|
FergusDevelopmentLLC/Coders-Workshop
|
3513bd5f79eaa85b4d2a648c5f343a224842325d
|
[
"MIT"
] | 39
|
2020-01-15T19:28:12.000Z
|
2021-11-26T05:13:29.000Z
|
Coding-Challenges/maxProfitWithKTransactions/max_profit_with_k_transactions.py
|
FergusDevelopmentLLC/Coders-Workshop
|
3513bd5f79eaa85b4d2a648c5f343a224842325d
|
[
"MIT"
] | 49
|
2019-12-02T23:29:53.000Z
|
2022-03-03T01:11:37.000Z
|
#!/usr/bin/env python3
def max_profit_with_k_transactions(prices, k):
pass
print(max_profit_with_k_transactions([5, 11, 3, 50, 60, 90], 2))
| 18.375
| 64
| 0.721088
| 26
| 147
| 3.769231
| 0.769231
| 0.183673
| 0.265306
| 0.285714
| 0.530612
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094488
| 0.136054
| 147
| 7
| 65
| 21
| 0.677165
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
3bce665f205789716aa1b80cbfff0b1599ce98f7
| 1,723
|
py
|
Python
|
black.py
|
mrytty/gradu-public
|
537337ab3dc49be9f1f4283706b0f4dcbc8cb059
|
[
"MIT"
] | null | null | null |
black.py
|
mrytty/gradu-public
|
537337ab3dc49be9f1f4283706b0f4dcbc8cb059
|
[
"MIT"
] | null | null | null |
black.py
|
mrytty/gradu-public
|
537337ab3dc49be9f1f4283706b0f4dcbc8cb059
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.stats import norm
from dcf import dcf
def blacklet(K, F, vol, omega=1):
log_ratio = np.log(F / K)
d1 = (log_ratio + 0.5 * vol**2) / vol
d2 = (log_ratio - 0.5 * vol**2) / vol
return F * omega * norm.cdf(omega * d1) - K * omega * norm.cdf(omega * d2)
def caplet_black(bond, forward, S, T, K, sigma, method='Act360'):
dcf_factor = dcf(S, T, method=method)
vol = sigma * np.sqrt(S)
return bond * dcf_factor * blacklet(K, S, forward, vol, omega=1)
def cap_black(bonds, forwards, times, K, sigma, method='Act360'):
if len(times) == 2:
return caplet_black(bonds, forwards, times[0], times[1], K, sigma, method=method)
else:
sum = 0
for i in range(len(times) - 1):
bond = bonds.pop()
forward = forwards.pop()
S, T = bond[i], bond[i]
sum += caplet_black(bond, forward, S, T, K, sigma, method=method)
return sum
def floorlet_black(bond, forward, S, T, K, sigma, method='Act360'):
dcf_factor = dcf(S, T, method=method)
vol = sigma * np.sqrt(S)
return bond * dcf_factor * blacklet(K, S, forward, vol, omega=-1)
def floor_black(bonds, forwards, times, K, sigma, method='Act360'):
if len(times) == 2:
return floorlet_black(bonds, forwards, times[0], times[1], K, sigma, method=method)
else:
sum = 0
for i in range(len(times) - 1):
bond = bonds.pop()
forward = forwards.pop()
S, T = bond[i], bond[i]
sum += flooret_black(bond, forward, S, T, K, sigma, method=method)
return sum
| 22.376623
| 92
| 0.547882
| 247
| 1,723
| 3.761134
| 0.210526
| 0.017223
| 0.103337
| 0.073197
| 0.800861
| 0.800861
| 0.800861
| 0.764263
| 0.764263
| 0.751346
| 0
| 0.029686
| 0.315728
| 1,723
| 76
| 93
| 22.671053
| 0.75827
| 0
| 0
| 0.526316
| 0
| 0
| 0.014661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.078947
| 0
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3be24e315c01210b57e504b94f4df287c7bc40bd
| 140
|
py
|
Python
|
src/pyfonycore/bootstrap/config/raw/allowed_environments_resolver.py
|
pyfony/core
|
32cb2e959590307fb845ccafec90b8264fdad4ab
|
[
"MIT"
] | null | null | null |
src/pyfonycore/bootstrap/config/raw/allowed_environments_resolver.py
|
pyfony/core
|
32cb2e959590307fb845ccafec90b8264fdad4ab
|
[
"MIT"
] | null | null | null |
src/pyfonycore/bootstrap/config/raw/allowed_environments_resolver.py
|
pyfony/core
|
32cb2e959590307fb845ccafec90b8264fdad4ab
|
[
"MIT"
] | null | null | null |
def resolve(raw_config):
return raw_config["allowed_environments"] if "allowed_environments" in raw_config else ["dev", "test", "prod"]
| 46.666667
| 114
| 0.75
| 19
| 140
| 5.263158
| 0.684211
| 0.27
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 140
| 2
| 115
| 70
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0.364286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ce077b174ce3b952791acabe4ad68c568a6a939e
| 116
|
py
|
Python
|
src/__init__.py
|
Briles/gruvbox
|
d127fc8887ea006ead49e97eed4d89955fbb5e16
|
[
"MIT"
] | 251
|
2016-03-04T04:32:10.000Z
|
2022-03-22T09:52:02.000Z
|
src/__init__.py
|
Briles/gruvbox
|
d127fc8887ea006ead49e97eed4d89955fbb5e16
|
[
"MIT"
] | 50
|
2016-03-09T07:41:55.000Z
|
2021-01-20T11:09:56.000Z
|
src/__init__.py
|
Briles/gruvbox
|
d127fc8887ea006ead49e97eed4d89955fbb5e16
|
[
"MIT"
] | 23
|
2016-05-21T19:57:27.000Z
|
2022-02-01T15:44:00.000Z
|
#!/usr/bin/env python
# coding: utf-8
from .documentation import *
from .support import *
from .gruvbox import *
| 12.888889
| 28
| 0.706897
| 16
| 116
| 5.125
| 0.75
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010417
| 0.172414
| 116
| 8
| 29
| 14.5
| 0.84375
| 0.293103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
025350c0c7033d82c74777bd3a2c1b8afcf6343c
| 128
|
py
|
Python
|
vika/types/__init__.py
|
Borye/vika.py
|
7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f
|
[
"MIT"
] | 39
|
2020-10-27T13:17:37.000Z
|
2022-03-17T11:04:39.000Z
|
vika/types/__init__.py
|
Borye/vika.py
|
7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f
|
[
"MIT"
] | 9
|
2020-10-27T14:44:48.000Z
|
2022-01-19T04:46:58.000Z
|
vika/types/__init__.py
|
Borye/vika.py
|
7b4ac29d308e00e2bbfc37dbcaa3f6c7a4a2236f
|
[
"MIT"
] | 8
|
2020-10-27T15:12:34.000Z
|
2022-01-19T14:23:15.000Z
|
from .field import *
from .record import *
from .view import *
from .node import *
from .space import *
from .response import *
| 18.285714
| 23
| 0.71875
| 18
| 128
| 5.111111
| 0.444444
| 0.543478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 128
| 6
| 24
| 21.333333
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0254447d57a0be3bf836d0336eada1d2f33217de
| 44
|
py
|
Python
|
libs/yowsup/yowsup/yowsup/layers/protocol_messages/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 22
|
2017-07-14T20:01:17.000Z
|
2022-03-08T14:22:39.000Z
|
libs/yowsup/yowsup/yowsup/layers/protocol_messages/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 6
|
2017-07-14T21:03:50.000Z
|
2021-06-10T19:08:32.000Z
|
libs/yowsup/yowsup/yowsup/layers/protocol_messages/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 13
|
2017-07-14T20:13:14.000Z
|
2020-11-12T08:06:05.000Z
|
from .layer import YowMessagesProtocolLayer
| 22
| 43
| 0.886364
| 4
| 44
| 9.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
02696a25dbbc101c97d3de57d8484432e689854f
| 77
|
py
|
Python
|
dfdt/__init__.py
|
zpleunis/dfdt
|
afd6d2a9b19c053ad5f7b6318f2e061cf7fb3964
|
[
"BSD-3-Clause"
] | 7
|
2020-10-01T13:36:23.000Z
|
2021-12-18T02:20:33.000Z
|
dfdt/__init__.py
|
zpleunis/dfdt
|
afd6d2a9b19c053ad5f7b6318f2e061cf7fb3964
|
[
"BSD-3-Clause"
] | null | null | null |
dfdt/__init__.py
|
zpleunis/dfdt
|
afd6d2a9b19c053ad5f7b6318f2e061cf7fb3964
|
[
"BSD-3-Clause"
] | null | null | null |
from .dfdt_utils import DynamicSpectrum
from .ac_mc_drift import ac_mc_drift
| 25.666667
| 39
| 0.87013
| 13
| 77
| 4.769231
| 0.615385
| 0.129032
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 77
| 2
| 40
| 38.5
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
026e5393739ba0193181f08049127f04fc8f480d
| 108
|
py
|
Python
|
office365/teams/teamsTabConfiguration.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
office365/teams/teamsTabConfiguration.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
office365/teams/teamsTabConfiguration.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
from office365.runtime.client_value import ClientValue
class TeamsTabConfiguration(ClientValue):
pass
| 18
| 54
| 0.833333
| 11
| 108
| 8.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031579
| 0.12037
| 108
| 5
| 55
| 21.6
| 0.905263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5a1803881b4e1656541c80773b9db3c8d08d9ba0
| 76
|
py
|
Python
|
utils/emote.py
|
Rishiraj0100/world-chat
|
46f0c255348e06787339c464c31577f90daea253
|
[
"MIT"
] | 14
|
2021-02-16T16:01:41.000Z
|
2022-01-30T06:28:22.000Z
|
utils/emote.py
|
Rishiraj0100/world-chat
|
46f0c255348e06787339c464c31577f90daea253
|
[
"MIT"
] | null | null | null |
utils/emote.py
|
Rishiraj0100/world-chat
|
46f0c255348e06787339c464c31577f90daea253
|
[
"MIT"
] | 6
|
2021-02-16T16:01:56.000Z
|
2021-07-16T11:24:50.000Z
|
check = "<:check:773959361953267742>"
xmark = "<:xmark:773959363379462184>"
| 25.333333
| 37
| 0.736842
| 6
| 76
| 9.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.514286
| 0.078947
| 76
| 2
| 38
| 38
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0.710526
| 0.710526
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a327e1467e7628b0f29ca76e88c9af75e3b23ec
| 162
|
py
|
Python
|
datasets/__init__.py
|
NoelShin/selfmask
|
396e0a3636b29591f505b6711be45eabe292919a
|
[
"MIT"
] | 11
|
2022-03-24T02:45:33.000Z
|
2022-03-30T02:53:33.000Z
|
datasets/__init__.py
|
NoelShin/selfmask
|
396e0a3636b29591f505b6711be45eabe292919a
|
[
"MIT"
] | 2
|
2022-03-25T11:08:34.000Z
|
2022-03-30T14:13:26.000Z
|
datasets/__init__.py
|
NoelShin/selfmask
|
396e0a3636b29591f505b6711be45eabe292919a
|
[
"MIT"
] | 1
|
2022-03-30T02:53:35.000Z
|
2022-03-30T02:53:35.000Z
|
# saliency object detection dataset
from datasets.dut_omron import DUTOMRONDataset
from datasets.duts import DUTSDataset
from datasets.ecssd import ECSSDDataset
| 27
| 46
| 0.864198
| 20
| 162
| 6.95
| 0.7
| 0.258993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 162
| 5
| 47
| 32.4
| 0.965278
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a8393eb6f5fe3821855cd70263a0a338512f0ec
| 656
|
py
|
Python
|
tests/test_hacks.py
|
Laserlicht/deepl-translate
|
23332c3042065f376f3b602a281248e30c80dec8
|
[
"MIT"
] | 1
|
2022-02-18T10:12:51.000Z
|
2022-02-18T10:12:51.000Z
|
tests/test_hacks.py
|
Lain1984/deepl-translate
|
ca61c63ff23031291fbaf220de92018fb85a57f0
|
[
"MIT"
] | null | null | null |
tests/test_hacks.py
|
Lain1984/deepl-translate
|
ca61c63ff23031291fbaf220de92018fb85a57f0
|
[
"MIT"
] | 2
|
2020-12-09T19:00:20.000Z
|
2022-03-11T06:17:51.000Z
|
from deepl.hacks import calculate_valid_timestamp, generate_id
def test_calculate_valid_timestamp():
assert 10 == calculate_valid_timestamp(timestamp=10, i_count=0)
assert 11 == calculate_valid_timestamp(timestamp=10, i_count=1)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=2)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=3)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=4)
assert 15 == calculate_valid_timestamp(timestamp=10, i_count=5)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=6)
def test_generate_id():
assert 100_000_000 > generate_id() > 1_000_000
| 41
| 67
| 0.775915
| 97
| 656
| 4.896907
| 0.278351
| 0.265263
| 0.435789
| 0.471579
| 0.656842
| 0.656842
| 0.656842
| 0.404211
| 0.404211
| 0
| 0
| 0.089474
| 0.131098
| 656
| 15
| 68
| 43.733333
| 0.74386
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.727273
| 1
| 0.181818
| true
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce50f807d741b49c254ef465207f171cabe8e2b2
| 6,131
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowApTagSummary/cli/equal/golden_output_expected.py
|
cphannan/genieparser
|
0b32e1ea633c532d67d89476aa5500f569cfbc6e
|
[
"Apache-2.0"
] | 1
|
2020-05-26T13:06:10.000Z
|
2020-05-26T13:06:10.000Z
|
src/genie/libs/parser/iosxe/tests/ShowApTagSummary/cli/equal/golden_output_expected.py
|
dalwar23/genieparser
|
a9df45d3ee23f107bfb55915068e90782f92fc99
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowApTagSummary/cli/equal/golden_output_expected.py
|
dalwar23/genieparser
|
a9df45d3ee23f107bfb55915068e90782f92fc99
|
[
"Apache-2.0"
] | 2
|
2021-02-12T21:42:30.000Z
|
2021-02-12T21:47:51.000Z
|
expected_output = {
"ap_name": {
"b25a-13-cap10": {
"ap_mac": "3c41.0fee.5094",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-12-cap01": {
"ap_mac": "3c41.0fee.5884",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-11-cap01": {
"ap_mac": "3c41.0fee.5d90",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-12-cap07": {
"ap_mac": "3c41.0fee.5de8",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-11-cap05": {
"ap_mac": "3c41.0fee.5df0",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-11-cap04": {
"ap_mac": "3c41.0fee.5e5c",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-12-cap08": {
"ap_mac": "3c41.0fee.5e74",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-11-cap01": {
"ap_mac": "3c41.0fee.5eac",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-11-cap08": {
"ap_mac": "3c41.0fee.5ef8",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-11-cap02": {
"ap_mac": "3c41.0fee.5f94",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-11-cap07": {
"ap_mac": "3c41.0fee.5fbc",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-12-cap02": {
"ap_mac": "2c57.4518.16ac",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-11-cap06": {
"ap_mac": "2c57.4518.2df0",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-11-cap08": {
"ap_mac": "2c57.4518.41b0",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-11-cap07": {
"ap_mac": "2c57.4518.432c",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-12-cap11": {
"ap_mac": "2c57.4518.4330",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-12-cap02": {
"ap_mac": "3c41.0fee.4394",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-13-cap09": {
"ap_mac": "2c57.4518.564c",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25b-12-cap03": {
"ap_mac": "2c57.4518.5b40",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b25a-12-cap10": {
"ap_mac": "2c57.4518.5b48",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B25_B25-1_fe778",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
},
"number_of_aps": 20,
}
| 36.933735
| 58
| 0.495351
| 668
| 6,131
| 4.181138
| 0.097305
| 0.150376
| 0.078768
| 0.128894
| 0.885786
| 0.840673
| 0.827784
| 0.827784
| 0.827784
| 0.827784
| 0
| 0.108642
| 0.339423
| 6,131
| 165
| 59
| 37.157576
| 0.580988
| 0
| 0
| 0.606061
| 0
| 0
| 0.518676
| 0.153319
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce5e16471e58422e7e3ce0350500eafe12061782
| 351
|
py
|
Python
|
utils/opt.py
|
hello-code2021/IDLMPIA
|
f8e303016bfc5fe7bb9978f972ad64e7b0adfe8e
|
[
"MIT"
] | null | null | null |
utils/opt.py
|
hello-code2021/IDLMPIA
|
f8e303016bfc5fe7bb9978f972ad64e7b0adfe8e
|
[
"MIT"
] | null | null | null |
utils/opt.py
|
hello-code2021/IDLMPIA
|
f8e303016bfc5fe7bb9978f972ad64e7b0adfe8e
|
[
"MIT"
] | null | null | null |
epoch = 100
train_result = "/home/yetaoyu/zc/Classification/patch_train_results"
train_dataset_dir = "/home/yetaoyu/zc/Classification/patch_data"
test_data_dir = "/home/yetaoyu/zc/Classification/patch_data"
test_result_dir = "/home/yetaoyu/zc/Classification/patch_test_results"
model_weight_path = "/home/yetaoyu/zc/Classification/patch_train_results"
| 58.5
| 73
| 0.834758
| 49
| 351
| 5.632653
| 0.346939
| 0.199275
| 0.235507
| 0.48913
| 0.757246
| 0.757246
| 0.630435
| 0.311594
| 0
| 0
| 0
| 0.008982
| 0.048433
| 351
| 6
| 73
| 58.5
| 0.817365
| 0
| 0
| 0
| 0
| 0
| 0.670455
| 0.670455
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ce9078ecffa5494ec28794ac6f868981192729ad
| 73
|
py
|
Python
|
examples/__init__.py
|
cdonovick/peak-examples
|
9d0799f1afafc801619a5cb15acd69603c49bb17
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
cdonovick/peak-examples
|
9d0799f1afafc801619a5cb15acd69603c49bb17
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
cdonovick/peak-examples
|
9d0799f1afafc801619a5cb15acd69603c49bb17
|
[
"MIT"
] | null | null | null |
from . import condition_flags
from . import fp
from . import reg_overlap
| 18.25
| 29
| 0.794521
| 11
| 73
| 5.090909
| 0.636364
| 0.535714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 30
| 24.333333
| 0.918033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce9e41a98e3d71fabd2577d7712145f9919c8e0a
| 5,298
|
py
|
Python
|
financial_analysis/tests/test_set_freq.py
|
Kautenja/financial-analysis
|
96870edc4b8762bb0ed297b937b263a69221c23e
|
[
"MIT"
] | 1
|
2022-02-26T01:27:23.000Z
|
2022-02-26T01:27:23.000Z
|
financial_analysis/tests/test_set_freq.py
|
Kautenja/financial-analysis
|
96870edc4b8762bb0ed297b937b263a69221c23e
|
[
"MIT"
] | null | null | null |
financial_analysis/tests/test_set_freq.py
|
Kautenja/financial-analysis
|
96870edc4b8762bb0ed297b937b263a69221c23e
|
[
"MIT"
] | 3
|
2020-09-10T21:11:32.000Z
|
2021-12-06T09:38:18.000Z
|
"""Test cases for the set_freq module."""
from unittest import TestCase
import numpy as np
import pandas as pd
from ..set_freq import set_freq
#
# MARK: lossless conversions
#
class ShouldLosslessConvertUsingGroupby(TestCase):
def test(self):
index = pd.to_datetime([1000, 2000, 3000], unit='ms')
price = pd.Series([100., 101., 102.], index=index)
dividend = pd.Series([5., 6., 7.], index=index)
price, dividend = set_freq(price, dividend, '1s', groupby=True)
# create the expected index and price
expected_index = pd.to_datetime([1, 2, 3], unit='s')
# expected values will have NaN where there were none
expected_price = pd.Series([100., 101., 102.], index=expected_index)
expected_dividend = pd.Series([5., 6., 7.], index=expected_index)
# make assertions through the pandas testing module
pd.testing.assert_series_equal(price, expected_price)
pd.testing.assert_series_equal(dividend, expected_dividend)
class ShouldLosslessConvertUsingAsfreq(TestCase):
def test(self):
index = pd.to_datetime([1000, 2000, 3000], unit='ms')
price = pd.Series([100., 101., 102.], index=index)
dividend = pd.Series([5., 6., 7.], index=index)
price, dividend = set_freq(price, dividend, '1s', groupby=False, method=None)
# create the expected index and price
expected_index = pd.to_datetime([1, 2, 3], unit='s')
# expected values will have NaN where there were none
expected_price = pd.Series([100., 101., 102.], index=expected_index)
expected_dividend = pd.Series([5., 6., 7.], index=expected_index)
# make assertions through the pandas testing module
pd.testing.assert_series_equal(price, expected_price)
pd.testing.assert_series_equal(dividend, expected_dividend)
#
# MARK: Up-sampling (inserting missing values)
#
class ShouldUpsampleTimeScaleUsingFfillTrue(TestCase):
def test(self):
index = pd.to_datetime([1, 2], unit='ms')
price = pd.Series([100., 101.], index=index)
dividend = pd.Series([5., 6.], index=index)
price, dividend = set_freq(price, dividend, '100U')
# create the expected index and price
expected_index = pd.to_datetime(list(range(1000, 2001, 100)), unit='us')
# expected values will be forward filled to meet timescale
expected_price = pd.Series(10 * [100.] + [101.], index=expected_index)
expected_dividend = pd.Series(10 * [5.] + [6.], index=expected_index)
# make assertions through the pandas testing module
pd.testing.assert_series_equal(price, expected_price)
pd.testing.assert_series_equal(dividend, expected_dividend)
class ShouldNotUpsampleTimeScaleUsingFfillFalse(TestCase):
def test(self):
index = pd.to_datetime([1, 2], unit='ms')
price = pd.Series([100., 101.], index=index)
dividend = pd.Series([5., 6.], index=index)
price, dividend = set_freq(price, dividend, '100U', ffill=False)
# create the expected index and price
expected_index = pd.to_datetime(list(range(1000, 2001, 100)), unit='us')
# expected values will have NaN where there were none
expected_price = pd.Series([100.] + 9 * [np.nan] + [101.], index=expected_index)
expected_dividend = pd.Series([5.] + 9 * [np.nan] + [6.], index=expected_index)
# make assertions through the pandas testing module
pd.testing.assert_series_equal(price, expected_price)
pd.testing.assert_series_equal(dividend, expected_dividend)
#
# MARK: Down-sampling (aggregating groups of data)
#
class ShouldDownsampleUsingGroupbyAndMeanValue(TestCase):
def test(self):
index = pd.to_datetime([1001, 1002], unit='ms')
price = pd.Series([100., 101.], index=index)
dividend = pd.Series([5., 6.], index=index)
price, dividend = set_freq(price, dividend, freq='1s', method='mean')
# create the expected index and price
expected_index = pd.to_datetime([1], unit='s')
# expected values will have NaN where there were none
expected_price = pd.Series([100.5], index=expected_index)
expected_dividend = pd.Series([5.5], index=expected_index)
# make assertions through the pandas testing module
pd.testing.assert_series_equal(price, expected_price)
pd.testing.assert_series_equal(dividend, expected_dividend)
# class ShouldDownsampleUsingAsfreq(TestCase):
# def test(self):
# index = pd.to_datetime([1001, 1002], unit='ms')
# price = pd.Series([100., 101.], index=index)
# dividend = pd.Series([5., 6.], index=index)
# price, dividend = set_freq(price, dividend, freq='1s', groupby=False, method='bfill')
# # create the expected index and price
# expected_index = pd.to_datetime([1], unit='s')
# # expected values will have NaN where there were none
# expected_price = pd.Series([100.], index=expected_index)
# expected_dividend = pd.Series([5.], index=expected_index)
# # make assertions through the pandas testing module
# pd.testing.assert_series_equal(price, expected_price)
# pd.testing.assert_series_equal(dividend, expected_dividend)
| 46.884956
| 95
| 0.665345
| 679
| 5,298
| 5.063328
| 0.142857
| 0.055846
| 0.031414
| 0.059337
| 0.837696
| 0.837696
| 0.837696
| 0.837696
| 0.786504
| 0.786504
| 0
| 0.04689
| 0.211023
| 5,298
| 112
| 96
| 47.303571
| 0.775598
| 0.312005
| 0
| 0.627119
| 0
| 0
| 0.009725
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 1
| 0.084746
| false
| 0
| 0.067797
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cac7eafb604f1065119b389c7dc00e48e4926ac
| 4,941
|
py
|
Python
|
setVolunteers.py
|
zoctobere/Siren
|
325bcbf78d721e20eecad953dfbf54cacddc0889
|
[
"MIT"
] | null | null | null |
setVolunteers.py
|
zoctobere/Siren
|
325bcbf78d721e20eecad953dfbf54cacddc0889
|
[
"MIT"
] | null | null | null |
setVolunteers.py
|
zoctobere/Siren
|
325bcbf78d721e20eecad953dfbf54cacddc0889
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from builtins import bot
import db
import config
@bot.command()
async def setcommentary(ctx, arg1, arg2, arg3):
if ctx.channel.id != config.adminChannel:
return
arg1 = arg1.upper()
if not db.doesRestreamExist(arg1):
await ctx.send('```No restream found with Restream ID: ' + arg1 + '```')
await ctx.message.delete()
return
if not db.isRestreamOpen(arg1):
if not ctx.author.id in config.superUsers:
if ctx.author.name != db.getRestreamField(arg1, 'assignedBy'):
await ctx.send('```Restream ' + arg1 + ' is not open. Please check the Restream ID and try again.```')
await ctx.message.delete()
return
if not db.doesUserExist(arg2) or not db.doesUserExist(arg3):
if not db.doesUserExist(arg2):
await ctx.send('```' + arg2 + ' is not in the database. Please check the spelling and try again. If this is a new restream team member, have Zoe reseed the db. Note: usernames are case sensitive.```')
if not db.doesUserExist(arg3):
await ctx.send('```' + arg3 + ' is not in the database. Please check the spelling and try again. If this is a new restream team member, have Zoe reseed the db. Note: usernames are case sensitive.```')
await ctx.message.delete()
return
db.setRestreamField(arg1, 'commentary1', arg2)
db.setRestreamField(arg1, 'commentary2', arg3)
await ctx.send('```Commentary for Restream ' + arg1 + ' set to: ' + arg2 + ', ' + arg3 + ' by ' + ctx.author.name + '.```')
await ctx.message.delete()
@setcommentary.error
async def clear_error(ctx, error):
if ctx.channel.id != config.adminChannel:
return
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('```Please specify the Restream ID and BOTH commentators separated by a space. Usage: .setcommentary <restreamID> <commentary1> <commentary2>```')
await ctx.message.delete()
@bot.command()
async def settracker(ctx, arg1, arg2):
if ctx.channel.id != config.adminChannel:
return
arg1 = arg1.upper()
if not db.doesRestreamExist(arg1):
await ctx.send('```No restream found with Restream ID: ' + arg1 + '```')
await ctx.message.delete()
return
if not db.isRestreamOpen(arg1):
if not ctx.author.id in config.superUsers:
await ctx.send('```Restream ' + arg1 + ' is not open. Please check the Restream ID and try again.```')
await ctx.message.delete()
return
if not db.doesUserExist(arg2):
await ctx.send('```' + arg2 + ' is not in the database. Please check the spelling and try again. If this is a new restream team member, have Zoe reseed the db. Note: usernames are case sensitive.```')
await ctx.message.delete()
return
db.setRestreamField(arg1, 'tracker', arg2)
await ctx.send('```Tracker for Restream ' + arg1 + ' set to ' + arg2 + ' by ' + ctx.author.name + '.```')
await ctx.message.delete()
@settracker.error
async def clear_error(ctx, error):
if ctx.channel.id != config.adminChannel:
return
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('```Please specify the Restream ID and tracker. Usage: .settracker <restreamID> <tracker>```')
await ctx.message.delete()
@bot.command()
async def setrestreamer(ctx, arg1, arg2):
if ctx.channel.id != config.adminChannel:
return
arg1 = arg1.upper()
if not db.doesRestreamExist(arg1):
await ctx.send('```No restream found with Restream ID: ' + arg1 + '```')
await ctx.message.delete()
return
if not db.isRestreamOpen(arg1):
if not ctx.author.id in config.superUsers:
await ctx.send('```Restream ' + arg1 + ' is not open. Please check the Restream ID and try again.```')
await ctx.message.delete()
return
if not db.doesUserExist(arg2):
await ctx.send('```' + arg2 + ' is not in the database. Please check the spelling and try again. If this is a new restream team member, have Zoe reseed the db. Note: usernames are case sensitive.```')
await ctx.message.delete()
return
db.setRestreamField(arg1, 'restreamer', arg2)
await ctx.send('```Restreamer for Restream ' + arg1 + ' set to ' + arg2 + ' by ' + ctx.author.name + '.```')
await ctx.message.delete()
@setrestreamer.error
async def clear_error(ctx, error):
if ctx.channel.id != config.adminChannel:
return
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('```Please specify the Restream ID and restreamer. Usage: .setrestreamer <restreamID> <restreamer>```')
await ctx.message.delete()
| 39.528
| 213
| 0.626391
| 614
| 4,941
| 5.035831
| 0.143322
| 0.080207
| 0.062096
| 0.101876
| 0.799483
| 0.799483
| 0.791721
| 0.791721
| 0.754851
| 0.754851
| 0
| 0.014921
| 0.253997
| 4,941
| 124
| 214
| 39.846774
| 0.823928
| 0
| 0
| 0.741935
| 0
| 0.053763
| 0.318455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043011
| 0
| 0.204301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cc895e5b57267bc734bd131915232e7c0cc8de7
| 5,014
|
py
|
Python
|
int/tests/test_invitations.py
|
ryjones/aries-acapy-plugin-toolbox
|
70108e2264a31831dab0fe9aafc70b3c310808c5
|
[
"Apache-2.0"
] | 1
|
2019-11-08T21:59:28.000Z
|
2019-11-08T21:59:28.000Z
|
int/tests/test_invitations.py
|
ryjones/aries-acapy-plugin-toolbox
|
70108e2264a31831dab0fe9aafc70b3c310808c5
|
[
"Apache-2.0"
] | 1
|
2019-11-15T21:30:18.000Z
|
2019-11-15T21:30:18.000Z
|
int/tests/test_invitations.py
|
ryjones/aries-acapy-plugin-toolbox
|
70108e2264a31831dab0fe9aafc70b3c310808c5
|
[
"Apache-2.0"
] | 4
|
2019-11-08T21:59:31.000Z
|
2019-11-18T21:21:22.000Z
|
"""Invitations tests"""
import pytest
from acapy_client import Client
from acapy_client.api.connection import delete_connection, get_connections
@pytest.fixture(autouse=True)
async def clear_invitation_state(backchannel: Client, connection_id: str):
"""Clear invitation after each test."""
yield
connections = await get_connections.asyncio(client=backchannel)
for connection in connections.results:
if connection.state == "invitation":
await delete_connection.asyncio(
client=backchannel, conn_id=connection.connection_id
)
@pytest.mark.asyncio
async def test_create_invitation(connection):
"""Test create invitation protocol"""
reply = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/create",
"alias": "Invitation I sent to Alice",
"label": "Bob",
"group": "admin",
"auto_accept": True,
"multi_use": True,
},
return_route="all",
)
assert (
reply["@type"]
== "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/invitation"
)
@pytest.mark.asyncio
async def test_oob_create_invitation(connection):
"""Test create invitation protocol"""
reply = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/oob-create",
"alias": "Invitation I sent to Alice",
"label": "Bob",
"group": "admin",
"auto_accept": True,
"multi_use": True,
},
return_route="all",
)
assert (
reply["@type"]
== "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/invitation"
)
@pytest.mark.asyncio
async def test_get_list(connection):
"""Test get list protocol"""
reply = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/get-list"
},
return_route="all",
)
assert (
reply["@type"]
== "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/list"
)
@pytest.mark.asyncio
async def test_num_results(connection):
"""Test that the create message protocol causes new item in results list"""
# Input number of messages to add to the list
added_num = 2
for i in range(added_num):
await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/create",
"alias": "Message I sent to Alice",
"label": "Bob",
"group": "admin",
"auto_accept": True,
"multi_use": True,
},
return_route="all",
)
reply = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/get-list"
},
return_route="all",
)
assert len(reply["results"]) == added_num
print(reply["results"][0])
assert (
reply["results"][0]["invitation_type"]
== "https://didcomm.org/connections/1.0/invitation"
)
@pytest.mark.asyncio
async def test_oob_num_results(connection):
"""Test that the create message protocol causes new item in results list"""
# Input number of messages to add to the list
added_num = 2
for i in range(added_num):
await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/oob-create",
"alias": "Message I sent to Alice",
"label": "Bob",
"group": "admin",
"auto_accept": True,
"multi_use": True,
},
return_route="all",
)
reply = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/get-list"
},
return_route="all",
)
assert len(reply["results"]) == added_num
print(reply["results"][0])
assert (
reply["results"][0]["invitation_type"]
== "https://didcomm.org/out-of-band/1.0/invitation"
)
@pytest.mark.asyncio
async def test_empty_list(connection):
"""Test that get-list returns no results if no create messages have been sent"""
reply = await connection.send_and_await_reply_async(
{
"@type": "https://github.com/hyperledger/aries-toolbox/tree/master/docs/admin-invitations/0.1/get-list"
},
return_route="all",
)
assert reply["results"] == []
| 34.342466
| 122
| 0.613482
| 586
| 5,014
| 5.110922
| 0.165529
| 0.039065
| 0.055092
| 0.06611
| 0.812354
| 0.812354
| 0.792988
| 0.792988
| 0.790985
| 0.763606
| 0
| 0.008552
| 0.25369
| 5,014
| 145
| 123
| 34.57931
| 0.791823
| 0.021141
| 0
| 0.578512
| 0
| 0.090909
| 0.340397
| 0
| 0
| 0
| 0
| 0
| 0.066116
| 1
| 0
| false
| 0
| 0.024793
| 0
| 0.024793
| 0.016529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cfb925753b1745fc256897f88dc0873a2553657
| 41
|
py
|
Python
|
mfr/extensions/docx/__init__.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 36
|
2015-08-31T20:24:22.000Z
|
2021-12-17T17:02:44.000Z
|
mfr/extensions/docx/__init__.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 190
|
2015-01-02T06:22:01.000Z
|
2022-01-19T11:27:03.000Z
|
mfr/extensions/docx/__init__.py
|
yacchin1205/RDM-modular-file-renderer
|
5bd18175a681d21e7be7fe0238132335a1cd8ded
|
[
"Apache-2.0"
] | 47
|
2015-01-27T15:45:22.000Z
|
2021-01-27T22:43:03.000Z
|
from .render import DocxRenderer # noqa
| 20.5
| 40
| 0.780488
| 5
| 41
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 1
| 41
| 41
| 0.941176
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0b4fcac310c4fc6d17afd6f739b8fd7c34474951
| 158
|
py
|
Python
|
BingRewards/src/config.py
|
Stefano-Solo/bing-rewards
|
601eecbe9e11ba0928d3acc33d2e55a31576dbf7
|
[
"MIT"
] | null | null | null |
BingRewards/src/config.py
|
Stefano-Solo/bing-rewards
|
601eecbe9e11ba0928d3acc33d2e55a31576dbf7
|
[
"MIT"
] | null | null | null |
BingRewards/src/config.py
|
Stefano-Solo/bing-rewards
|
601eecbe9e11ba0928d3acc33d2e55a31576dbf7
|
[
"MIT"
] | null | null | null |
credentials = dict(
email = '',
password = '',
telegram_api_token = '__telegram_api_token__',
telegram_userid = '__telegram_userid__'
)
| 22.571429
| 51
| 0.64557
| 14
| 158
| 6.285714
| 0.571429
| 0.25
| 0.363636
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.240506
| 158
| 6
| 52
| 26.333333
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.269737
| 0.144737
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0b88b303dcafb0c8f13c83989b6cba9e6d899fb7
| 72
|
py
|
Python
|
close_numerical_matches/__init__.py
|
shmulvad/close_numerical_matches
|
03bcc013eda3f79f417ded2c8e4d96af32a15401
|
[
"MIT"
] | 1
|
2021-07-11T13:35:21.000Z
|
2021-07-11T13:35:21.000Z
|
close_numerical_matches/__init__.py
|
shmulvad/close_numerical_matches
|
03bcc013eda3f79f417ded2c8e4d96af32a15401
|
[
"MIT"
] | null | null | null |
close_numerical_matches/__init__.py
|
shmulvad/close_numerical_matches
|
03bcc013eda3f79f417ded2c8e4d96af32a15401
|
[
"MIT"
] | null | null | null |
from .version import __version__
from .find_matches import find_matches
| 24
| 38
| 0.861111
| 10
| 72
| 5.6
| 0.5
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 39
| 36
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0baa46c524e9807ef5dc1884c70c1f269e812b14
| 167
|
py
|
Python
|
conftest.py
|
mpolidori/harvest-travis
|
000c85b09294812d27fa5faea6ea10a60ae989c7
|
[
"PostgreSQL"
] | 2
|
2017-10-02T22:25:43.000Z
|
2017-12-31T14:54:17.000Z
|
conftest.py
|
NCAR/ckanext-harvest
|
51d03fa527376eb3e73a90fd9771b82f89d97398
|
[
"PostgreSQL"
] | null | null | null |
conftest.py
|
NCAR/ckanext-harvest
|
51d03fa527376eb3e73a90fd9771b82f89d97398
|
[
"PostgreSQL"
] | null | null | null |
# -*- coding: utf-8 -*-
pytest_plugins = [
u'ckan.tests.pytest_ckan.ckan_setup',
u'ckan.tests.pytest_ckan.fixtures',
u'ckanext.harvest.tests.fixtures',
]
| 20.875
| 41
| 0.670659
| 23
| 167
| 4.695652
| 0.521739
| 0.092593
| 0.185185
| 0.296296
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.149701
| 167
| 7
| 42
| 23.857143
| 0.753521
| 0.125749
| 0
| 0
| 0
| 0
| 0.652778
| 0.652778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e7e8441db738b85aba179108724297db1901eff5
| 4,323
|
py
|
Python
|
chat-plugin/chat/test/test_predicate.py
|
lyoung83/chat
|
b21a3255db6c825a22a4ef02642fb9c3cb72d9c3
|
[
"Apache-2.0"
] | 17
|
2016-04-09T09:54:43.000Z
|
2021-06-29T04:59:54.000Z
|
chat-plugin/chat/test/test_predicate.py
|
lyoung83/chat
|
b21a3255db6c825a22a4ef02642fb9c3cb72d9c3
|
[
"Apache-2.0"
] | 210
|
2016-01-27T09:57:29.000Z
|
2020-10-29T17:19:44.000Z
|
chat-plugin/chat/test/test_predicate.py
|
lyoung83/chat
|
b21a3255db6c825a22a4ef02642fb9c3cb72d9c3
|
[
"Apache-2.0"
] | 21
|
2016-06-22T11:48:56.000Z
|
2019-01-07T17:08:45.000Z
|
import unittest
from ..query import Predicate
class TestPredicate(unittest.TestCase):
def test_simple_in(self):
p = Predicate(_id__in=["a", "b", "c"])
self.assertListEqual(
["in", {"$type": "keypath", "$val": "_id"},
["a", "b", "c"]]
, p.to_dict())
def test_simple_not(self):
p = ~Predicate(_id__in=["a", "b", "c"])
self.assertListEqual(['not',
["in", {"$type": "keypath", "$val": "_id"},
["a", "b", "c"]]]
, p.to_dict())
def test_simple_and(self):
p = Predicate(_id__eq="a", deleted__eq=False)
expected = ["and", ["eq", {"$type": "keypath", "$val": "_id"}, "a"], ["eq", {"$type": "keypath", "$val": "deleted"}, False] ]
self.assertListEqual(
expected,
p.to_dict()
)
p = Predicate(_id__eq="a")
p = p & Predicate(deleted__eq=False)
self.assertListEqual(expected, p.to_dict())
def test_simple_and_three_statements(self):
p = Predicate(time__lte="2010-07-10", time__gte="2009-01-01", deleted__ne=False)
expected = ["and",
["ne", {"$type": "keypath", "$val": "deleted"}, False],
["gte", {"$type": "keypath", "$val": "time"}, "2009-01-01"],
["lte", {"$type": "keypath", "$val": "time"}, "2010-07-10"]]
self.assertListEqual(expected, p.to_dict())
def test_simple_or(self):
p = Predicate(_id__eq="simple", gender__eq="M", op=Predicate.OR)
expected = ["or", ["eq", {"$type": "keypath", "$val": "_id"}, "simple"], ["eq", {"$type": "keypath", "$val": "gender"}, "M"]]
self.assertListEqual(expected, p.to_dict())
def test_simple_or_three_statement(self):
p = Predicate(_id__eq="chima", gender__eq="M", type__in=["cat", "dog"] , op=Predicate.OR)
expected = ["or", ["eq", {"$type": "keypath", "$val": "_id"}, "chima"],
["eq", {"$type": "keypath", "$val": "gender"}, "M"],
["in", {"$type": "keypath", "$val": "type"}, ["cat", "dog"]]]
self.assertListEqual(expected, p.to_dict())
def test_compound_statement_1(self):
p = Predicate(_id__eq="chima", gender__eq="M", type__eq="dog")
p2 = Predicate(_id__eq="fatseng", gender__eq="F", type__eq="cat")
p3 = Predicate(_id__eq="milktea", gender__eq="NA", type__eq="frog")
p4 = p & p2 & p3
expected = ["and", ["eq", {"$type": "keypath", "$val": "_id"}, "chima"],
["eq", {"$type": "keypath", "$val": "gender"}, "M"],
["eq", {"$type": "keypath", "$val": "type"}, "dog"],
["eq", {"$type": "keypath", "$val": "_id"}, "fatseng"],
["eq", {"$type": "keypath", "$val": "gender"}, "F"],
["eq", {"$type": "keypath", "$val": "type"}, "cat"],
["eq", {"$type": "keypath", "$val": "_id"}, "milktea"],
["eq", {"$type": "keypath", "$val": "gender"}, "NA"],
["eq", {"$type": "keypath", "$val": "type"}, "frog"]]
self.assertListEqual(expected, p4.to_dict())
def test_compound_statement_2(self):
p = Predicate(_id__eq="chima", gender__eq="M", type__eq="dog")
p2 = Predicate(_id__eq="fatseng", gender__eq="F", type__eq="cat")
p3 = ~Predicate(_id__eq="milktea", gender__eq="NA", type__eq="frog")
p4 = p | p2 | p3
expected = ["or", ["and",["eq", {"$type": "keypath", "$val": "_id"}, "chima"],
["eq", {"$type": "keypath", "$val": "gender"}, "M"],
["eq", {"$type": "keypath", "$val": "type"}, "dog"]],
["and",["eq", {"$type": "keypath", "$val": "_id"}, "fatseng"],
["eq", {"$type": "keypath", "$val": "gender"}, "F"],
["eq", {"$type": "keypath", "$val": "type"}, "cat"]],
["not", ["and", ["eq", {"$type": "keypath", "$val": "_id"}, "milktea"],
["eq", {"$type": "keypath", "$val": "gender"}, "NA"],
["eq", {"$type": "keypath", "$val": "type"}, "frog"]]]]
self.assertListEqual(expected, p4.to_dict())
| 51.464286
| 133
| 0.459866
| 462
| 4,323
| 4.047619
| 0.12987
| 0.176471
| 0.224599
| 0.205348
| 0.824599
| 0.759893
| 0.72246
| 0.680214
| 0.658289
| 0.633155
| 0
| 0.015008
| 0.291002
| 4,323
| 83
| 134
| 52.084337
| 0.595106
| 0
| 0
| 0.319444
| 0
| 0
| 0.214715
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.111111
| false
| 0
| 0.027778
| 0
| 0.152778
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f00da208bbb7f65efc1010ca225d6220ace0e69d
| 30
|
py
|
Python
|
PyMturkGspread/__init__.py
|
haldunanil/PyMturkGspread
|
2f617930461f89323af96298948ee576f0ccea8c
|
[
"MIT"
] | null | null | null |
PyMturkGspread/__init__.py
|
haldunanil/PyMturkGspread
|
2f617930461f89323af96298948ee576f0ccea8c
|
[
"MIT"
] | null | null | null |
PyMturkGspread/__init__.py
|
haldunanil/PyMturkGspread
|
2f617930461f89323af96298948ee576f0ccea8c
|
[
"MIT"
] | null | null | null |
from .mturk import GoogleForms
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f05c30bc59993e3d8a80e3a05c3f976468a99996
| 3,987
|
py
|
Python
|
tests/test_product_variations.py
|
lotrekagency/pywoo
|
84034c053a4873246394203ae819190e8402c057
|
[
"MIT"
] | 5
|
2019-09-11T15:39:57.000Z
|
2022-01-21T14:23:51.000Z
|
tests/test_product_variations.py
|
lotrekagency/pywoo
|
84034c053a4873246394203ae819190e8402c057
|
[
"MIT"
] | 2
|
2019-10-11T15:34:28.000Z
|
2019-10-15T15:38:23.000Z
|
tests/test_product_variations.py
|
lotrekagency/pywoo
|
84034c053a4873246394203ae819190e8402c057
|
[
"MIT"
] | 1
|
2021-06-17T17:27:02.000Z
|
2021-06-17T17:27:02.000Z
|
import unittest
from mock import patch
from pywoo.pywoo import Api
from pywoo.models.product_variations import ProductVariation
from tests.tools import mock_request
class TestProductVariation(unittest.TestCase):
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_api_post(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = api.create_product_variation(56)
assert type(obj) == ProductVariation
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_api_get(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = api.get_product_variations(56)
assert all(type(x) == ProductVariation for x in obj)
obj = api.get_product_variations(56, 57)
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_api_put(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = api.update_product_variation(56, 57)
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_api_delete(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = api.delete_product_variation(56, 57)
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_classmethod_post(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = ProductVariation.create_product_variation(api, 56)
assert type(obj) == ProductVariation
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_classmethod_get(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = ProductVariation.get_product_variations(api, 56)
assert all(type(x) == ProductVariation for x in obj)
obj = ProductVariation.get_product_variations(api, 56, 57)
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_classmethod_put(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = ProductVariation.edit_product_variation(api, 56, 57)
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_classmethod_delete(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = ProductVariation.delete_product_variation(api, 56, 57)
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_object_update(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = ProductVariation.get_product_variations(api, 56, 57)
assert type(obj) == ProductVariation and obj.id == 57
obj = obj.update()
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_object_delete(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = api.get_product_variations(56, 57)
assert type(obj) == ProductVariation and obj.id == 57
obj = obj.delete()
assert type(obj) == ProductVariation and obj.id == 57
@patch('pywoo.pywoo.requests.api.request', side_effect=mock_request)
def test_object_refresh(self, func):
api = Api('', 'fake_consumer_key', 'fake_consumer_secret')
obj = api.get_product_variations(56, 57)
assert type(obj) == ProductVariation and obj.id == 57
obj.refresh()
assert type(obj) == ProductVariation and obj.id == 57
| 38.708738
| 72
| 0.686481
| 515
| 3,987
| 5.08932
| 0.097087
| 0.100725
| 0.069439
| 0.154903
| 0.892026
| 0.892026
| 0.892026
| 0.892026
| 0.877146
| 0.877146
| 0
| 0.021164
| 0.194131
| 3,987
| 102
| 73
| 39.088235
| 0.794585
| 0
| 0
| 0.605634
| 0
| 0
| 0.190369
| 0.088287
| 0
| 0
| 0
| 0
| 0.225352
| 1
| 0.15493
| false
| 0
| 0.070423
| 0
| 0.239437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2cd1abafb4efcf565e288216890e652357967bd
| 80
|
py
|
Python
|
rve/cmd/__init__.py
|
eupedrosa/ros-venv
|
7d203288ec5ab54467b1e54406f94b876ab54ece
|
[
"BSD-3-Clause"
] | null | null | null |
rve/cmd/__init__.py
|
eupedrosa/ros-venv
|
7d203288ec5ab54467b1e54406f94b876ab54ece
|
[
"BSD-3-Clause"
] | null | null | null |
rve/cmd/__init__.py
|
eupedrosa/ros-venv
|
7d203288ec5ab54467b1e54406f94b876ab54ece
|
[
"BSD-3-Clause"
] | null | null | null |
from . import init
from . import run
from . import remove
from . import status
| 13.333333
| 20
| 0.7375
| 12
| 80
| 4.916667
| 0.5
| 0.677966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2125
| 80
| 5
| 21
| 16
| 0.936508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b2f7f5ba03af869001e57d67fd929a5d364aaa0e
| 9,555
|
py
|
Python
|
.ipynb_checkpoints/utils_continuous-checkpoint.py
|
hsharsh/pinn-torch
|
fa563b324c286ec4425529f5ea1db03e68bec2f3
|
[
"MIT"
] | 1
|
2022-01-25T04:27:33.000Z
|
2022-01-25T04:27:33.000Z
|
utils_continuous.py
|
hsharsh/pinn-torch
|
fa563b324c286ec4425529f5ea1db03e68bec2f3
|
[
"MIT"
] | null | null | null |
utils_continuous.py
|
hsharsh/pinn-torch
|
fa563b324c286ec4425529f5ea1db03e68bec2f3
|
[
"MIT"
] | null | null | null |
# Data processing imports
import scipy.io as io
import numpy as np
from pyDOE import lhs
# Plotting imports
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
import matplotlib.gridspec as gridspec
def load_dataset(file):
data = io.loadmat(file)
return data['x'], data['t'], data['usol'].T
# Inference
def preprocess_data_continuous_inference(file, Nu = 100, Nf = 10000):
x, t, u_exact = load_dataset(file)
X, T = np.meshgrid(x, t)
test_X = np.hstack([X.flatten()[:,None], T.flatten()[:,None]])
test_u = u_exact.flatten()[:,None]
# Sampling for initial and boundary conditions
x_i = np.hstack([X[:1,:].T,T[:1,:].T]) # Initial
u_i = u_exact[:1,:].T
x_b1 = np.hstack([X[:,:1], T[:,:1]]) # Boundary 1
u_b1 = u_exact[:,:1]
x_b2 = np.hstack([X[:,-1:], T[:,-1:]]) # Boundary 2
u_b2 = u_exact[:,-1:]
train_X_u = np.vstack([x_i, x_b1, x_b2])
train_u = np.vstack([u_i, u_b1, u_b2])
# Domain bounds for Lattice Hypercube Sampling
lb = test_X.min(0)
ub = test_X.max(0)
collocation_points = lb + (ub-lb)*lhs(2, Nf) # Samples (Nf x 2) points and scales them to be in the domain
train_X_f = np.vstack([collocation_points, train_X_u])
# Restrics the boundary conditions to only Nu random points
sample = np.random.choice(train_X_u.shape[0], size = Nu)
train_X_u = train_X_u[sample]
train_u = train_u[sample]
return x, t, u_exact, X, T, lb, ub, train_X_u, train_u, train_X_f, test_X, test_u
def plot_results_continuous_inference(x, t, X, T, u_exact, u_pred, train_X_u, train_X_f, train_u, test_X):
u_pred = griddata(test_X, u_pred.flatten(), (X, T), method='cubic')
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
####### Row 0: u(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_pred.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(), t.max(), x.min(), x.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.plot(train_X_u[:,1], train_X_u[:,0], 'kx', markersize = 4, clip_on = False)
ax.plot(train_X_f[:,1], train_X_f[:,0], 'k.', markersize = 1, clip_on = False)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[25]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[50]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[75]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
# ax.legend(frameon=False, loc = 'best')
ax.set_title('$u(t,x)$', fontsize = 10)
####### Row 1: u(t,x) slices ##################
gs1 = gridspec.GridSpec(2, 3)
gs1.update(top=1-12/30, bottom=0, left=0.1, right=0.9, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x,u_exact[0,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[0,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = 0$', fontsize = 10)
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax = plt.subplot(gs1[0, 1])
ax.plot(x,u_exact[24,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[24,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = 0.25$', fontsize = 10)
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax = plt.subplot(gs1[0, 2])
ax.plot(x,u_exact[49,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[49,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 0.50$', fontsize = 10)
ax = plt.subplot(gs1[1, 0])
ax.plot(x,u_exact[74,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[74,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 0.75$', fontsize = 10)
ax = plt.subplot(gs1[1, 1])
ax.plot(x,u_exact[99,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[99,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 1.00$', fontsize = 10)
ax.legend(loc='center', bbox_to_anchor=(2.5, 0.6), ncol=5, frameon=False)
plt.show()
# Identification
def preprocess_data_continuous_identification(file, N = 2000, noise = 0.0):
x, t, u_exact = load_dataset(file)
X, T = np.meshgrid(x, t)
test_X = np.hstack([X.flatten()[:,None], T.flatten()[:,None]])
test_u = u_exact.flatten()[:,None]
# Domain bounds for Lattice Hypercube Sampling
lb = test_X.min(0)
ub = test_X.max(0)
# Sample N random points
sample = np.random.choice(test_X.shape[0], size = N)
train_X = test_X[sample]
train_u = test_u[sample]
train_u = train_u + noise*np.std(train_u)*np.random.randn(train_u.shape[0], train_u.shape[1])
return x, t, u_exact, X, T, lb, ub, train_X, train_u, test_X, test_u
def plot_results_continuous_identification(x, t, X, T, u_exact, u_pred, train_X, train_u, test_X, lambda_1, lambda_2):
u_pred = griddata(test_X, u_pred.flatten(), (X, T), method='cubic')
fig = plt.figure(figsize = (10, 9.5))
ax = plt.gca()
ax.axis('off')
fig.patch.set_facecolor('white')
####### Row 0: u(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(u_pred.T, interpolation='nearest', cmap='rainbow',
extent=[t.min(), t.max(), x.min(), x.max()],
origin='lower', aspect='auto')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.plot(train_X[:,1], train_X[:,0], 'k.', markersize = 2, clip_on = False)
line = np.linspace(x.min(), x.max(), 2)[:,None]
ax.plot(t[25]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[50]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.plot(t[75]*np.ones((2,1)), line, 'w-', linewidth = 1)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
# ax.legend(frameon=False, loc = 'best')
ax.set_title('$u(t,x)$', fontsize = 10)
####### Row 1: u(t,x) slices ##################
gs1 = gridspec.GridSpec(2, 3)
gs1.update(top=1-12/30, bottom=0, left=0.1, right=0.9, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x,u_exact[0,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[0,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = 0$', fontsize = 10)
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax = plt.subplot(gs1[0, 1])
ax.plot(x,u_exact[24,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[24,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.set_title('$t = 0.25$', fontsize = 10)
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax = plt.subplot(gs1[0, 2])
ax.plot(x,u_exact[49,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[49,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 0.50$', fontsize = 10)
ax = plt.subplot(gs1[1, 0])
ax.plot(x,u_exact[74,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[74,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 0.75$', fontsize = 10)
ax = plt.subplot(gs1[1, 1])
ax.plot(x,u_exact[99,:], 'b-', linewidth = 2, label = 'Exact')
ax.plot(x,u_pred[99,:], 'r--', linewidth = 2, label = 'Prediction')
ax.set_xlabel('$x$')
ax.set_ylabel('$u(t,x)$')
ax.axis('square')
ax.set_xlim([-1.1,1.1])
ax.set_ylim([-1.1,1.1])
ax.set_title('$t = 1.00$', fontsize = 10)
ax.legend(loc='center', bbox_to_anchor=(1.8, 0.3), ncol=5, frameon=False)
# Prediction
ax = plt.subplot(gs1[1, 2])
ax.axis('off')
s1 = '$\begin{tabular}{ |c|c| } \hline Correct PDE & $u_t + u u_x - 0.0031831 u_{xx} = 0$ \\ \hline Identified PDE (clean data) & '
s2 = '$u_t + %.5f u u_x - %.7f u_{xx} = 0$ \\ \hline ' % (lambda_1, lambda_2)
# s3 = r'Identified PDE (1\% noise) & '
# s4 = r'$u_t + %.5f u u_x - %.7f u_{xx} = 0$ \\ \hline ' % (lambda_1_value_noisy, lambda_2_value_noisy)
s3 = '\end{tabular}$'
s = s1+s2+s3
ax.text(-0.3,0.5,f'Correct PDE: $u_t + u u_x - 0.0031831 u_{{xx}} = 0$ \n\t\t\t$\lambda_1$: {lambda_1:.5f}, $\lambda_2$: {lambda_2:.5f}')
plt.show()
| 35.786517
| 141
| 0.569963
| 1,660
| 9,555
| 3.139759
| 0.126506
| 0.024559
| 0.023024
| 0.030698
| 0.763239
| 0.754221
| 0.731389
| 0.723715
| 0.710668
| 0.710668
| 0
| 0.059324
| 0.200837
| 9,555
| 266
| 142
| 35.921053
| 0.623232
| 0.072737
| 0
| 0.774359
| 0
| 0.010256
| 0.105366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.035897
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
652a270a18b7168a39e6adf7657a9a7c192f2373
| 1,767
|
py
|
Python
|
stubs/3.2/math.py
|
TimSimpsonR/mypy
|
5e6fd6335e0662b0477e1d678269f33e6f4194ba
|
[
"PSF-2.0"
] | 1
|
2019-06-16T07:05:32.000Z
|
2019-06-16T07:05:32.000Z
|
stubs/3.2/math.py
|
TimSimpsonR/mypy
|
5e6fd6335e0662b0477e1d678269f33e6f4194ba
|
[
"PSF-2.0"
] | null | null | null |
stubs/3.2/math.py
|
TimSimpsonR/mypy
|
5e6fd6335e0662b0477e1d678269f33e6f4194ba
|
[
"PSF-2.0"
] | null | null | null |
# Stubs for math
# Ron Murawski <ron@horizonchess.com>
# based on: http://docs.python.org/3.2/library/math.html
from typing import overload, Tuple, Iterable
# ----- variables and constants -----
e = 0.0
pi = 0.0
# ----- functions -----
def ceil(x: float) -> int: pass
def copysign(x: float, y: float) -> float: pass
def fabs(x: float) -> float: pass
def factorial(x: int) -> int: pass
def floor(x: float) -> int: pass
def fmod(x: float, y: float) -> float: pass
def frexp(x: float) -> Tuple[float, int]: pass
def fsum(iterable: Iterable) -> float: pass
def isfinite(x: float) -> bool: pass
def isinf(x: float) -> bool: pass
def isnan(x: float) -> bool: pass
def ldexp(x: float, i: int) -> float: pass
def modf(x: float) -> Tuple[float, float]: pass
def trunc(x: float) -> float: pass
def exp(x: float) -> float: pass
def expm1(x: float) -> float: pass
def log(x: float, base: float = e) -> float: pass
def log1p(x: float) -> float: pass
def log10(x: float) -> float: pass
def pow(x: float, y: float) -> float: pass
def sqrt(x: float) -> float: pass
def acos(x: float) -> float: pass
def asin(x: float) -> float: pass
def atan(x: float) -> float: pass
def atan2(y: float, x: float) -> float: pass
def cos(x: float) -> float: pass
def hypot(x: float, y: float) -> float: pass
def sin(x: float) -> float: pass
def tan(x: float) -> float: pass
def degrees(x: float) -> float: pass
def radians(x: float) -> float: pass
def acosh(x: float) -> float: pass
def asinh(x: float) -> float: pass
def atanh(x: float) -> float: pass
def cosh(x: float) -> float: pass
def sinh(x: float) -> float: pass
def tanh(x: float) -> float: pass
def erf(x: object) -> float: pass
def erfc(x: object) -> float: pass
def gamma(x: object) -> float: pass
def lgamma(x: object) -> float: pass
| 32.722222
| 56
| 0.651952
| 300
| 1,767
| 3.84
| 0.27
| 0.243056
| 0.34375
| 0.398438
| 0.548611
| 0.083333
| 0.083333
| 0
| 0
| 0
| 0
| 0.007509
| 0.170911
| 1,767
| 53
| 57
| 33.339623
| 0.77884
| 0.092247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.931818
| false
| 0.931818
| 0.022727
| 0
| 0.954545
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
e8f5a6f57f0912d48a5cf5d5564ecf3cd6ce96fc
| 22,135
|
py
|
Python
|
fonts/vector/romanc.py
|
szczys/st7789_mpy
|
bc854ec453d7644ce1773f7ed4d41504f37d376b
|
[
"MIT"
] | 153
|
2020-02-02T11:03:14.000Z
|
2022-03-30T05:47:07.000Z
|
fonts/vector/romanc.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 58
|
2020-04-11T23:23:02.000Z
|
2022-03-26T20:45:23.000Z
|
fonts/vector/romanc.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 50
|
2020-02-02T11:05:23.000Z
|
2022-03-22T15:24:42.000Z
|
WIDTH = 32
HEIGHT = 32
FIRST = 0x20
LAST = 0x7f
_font =\
b'\x00\x4a\x5a\x0e\x4d\x57\x52\x46\x51\x48\x52\x54\x53\x48\x52'\
b'\x46\x20\x52\x52\x48\x52\x4e\x20\x52\x52\x59\x51\x5a\x52\x5b'\
b'\x53\x5a\x52\x59\x15\x49\x5b\x4e\x46\x4d\x47\x4d\x4d\x20\x52'\
b'\x4e\x47\x4d\x4d\x20\x52\x4e\x46\x4f\x47\x4d\x4d\x20\x52\x57'\
b'\x46\x56\x47\x56\x4d\x20\x52\x57\x47\x56\x4d\x20\x52\x57\x46'\
b'\x58\x47\x56\x4d\x0b\x48\x5d\x53\x42\x4c\x62\x20\x52\x59\x42'\
b'\x52\x62\x20\x52\x4c\x4f\x5a\x4f\x20\x52\x4b\x55\x59\x55\x29'\
b'\x48\x5c\x50\x42\x50\x5f\x20\x52\x54\x42\x54\x5f\x20\x52\x58'\
b'\x49\x57\x4a\x58\x4b\x59\x4a\x59\x49\x57\x47\x54\x46\x50\x46'\
b'\x4d\x47\x4b\x49\x4b\x4b\x4c\x4d\x4d\x4e\x4f\x4f\x55\x51\x57'\
b'\x52\x59\x54\x20\x52\x4b\x4b\x4d\x4d\x4f\x4e\x55\x50\x57\x51'\
b'\x58\x52\x59\x54\x59\x58\x57\x5a\x54\x5b\x50\x5b\x4d\x5a\x4b'\
b'\x58\x4b\x57\x4c\x56\x4d\x57\x4c\x58\x1f\x46\x5e\x5b\x46\x49'\
b'\x5b\x20\x52\x4e\x46\x50\x48\x50\x4a\x4f\x4c\x4d\x4d\x4b\x4d'\
b'\x49\x4b\x49\x49\x4a\x47\x4c\x46\x4e\x46\x50\x47\x53\x48\x56'\
b'\x48\x59\x47\x5b\x46\x20\x52\x57\x54\x55\x55\x54\x57\x54\x59'\
b'\x56\x5b\x58\x5b\x5a\x5a\x5b\x58\x5b\x56\x59\x54\x57\x54\x30'\
b'\x46\x5f\x5b\x4e\x5a\x4f\x5b\x50\x5c\x4f\x5c\x4e\x5b\x4d\x5a'\
b'\x4d\x59\x4e\x58\x50\x56\x55\x54\x58\x52\x5a\x50\x5b\x4d\x5b'\
b'\x4a\x5a\x49\x58\x49\x55\x4a\x53\x50\x4f\x52\x4d\x53\x4b\x53'\
b'\x49\x52\x47\x50\x46\x4e\x47\x4d\x49\x4d\x4b\x4e\x4e\x50\x51'\
b'\x55\x58\x57\x5a\x5a\x5b\x5b\x5b\x5c\x5a\x5c\x59\x20\x52\x4d'\
b'\x5b\x4b\x5a\x4a\x58\x4a\x55\x4b\x53\x4d\x51\x20\x52\x4d\x4b'\
b'\x4e\x4d\x56\x58\x58\x5a\x5a\x5b\x05\x4e\x56\x52\x46\x51\x4d'\
b'\x20\x52\x53\x46\x51\x4d\x13\x4b\x59\x56\x42\x54\x44\x52\x47'\
b'\x50\x4b\x4f\x50\x4f\x54\x50\x59\x52\x5d\x54\x60\x56\x62\x20'\
b'\x52\x54\x44\x52\x48\x51\x4b\x50\x50\x50\x54\x51\x59\x52\x5c'\
b'\x54\x60\x13\x4b\x59\x4e\x42\x50\x44\x52\x47\x54\x4b\x55\x50'\
b'\x55\x54\x54\x59\x52\x5d\x50\x60\x4e\x62\x20\x52\x50\x44\x52'\
b'\x48\x53\x4b\x54\x50\x54\x54\x53\x59\x52\x5c\x50\x60\x08\x4a'\
b'\x5a\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20\x52\x57\x4f'\
b'\x4d\x55\x05\x45\x5f\x52\x49\x52\x5b\x20\x52\x49\x52\x5b\x52'\
b'\x07\x4e\x56\x53\x57\x52\x58\x51\x57\x52\x56\x53\x57\x53\x59'\
b'\x51\x5b\x02\x45\x5f\x49\x52\x5b\x52\x05\x4e\x56\x52\x56\x51'\
b'\x57\x52\x58\x53\x57\x52\x56\x02\x47\x5d\x5b\x42\x49\x62\x27'\
b'\x48\x5c\x51\x46\x4e\x47\x4c\x4a\x4b\x4f\x4b\x52\x4c\x57\x4e'\
b'\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x57\x59\x52\x59\x4f\x58\x4a'\
b'\x56\x47\x53\x46\x51\x46\x20\x52\x51\x46\x4f\x47\x4e\x48\x4d'\
b'\x4a\x4c\x4f\x4c\x52\x4d\x57\x4e\x59\x4f\x5a\x51\x5b\x20\x52'\
b'\x53\x5b\x55\x5a\x56\x59\x57\x57\x58\x52\x58\x4f\x57\x4a\x56'\
b'\x48\x55\x47\x53\x46\x0a\x48\x5c\x4e\x4a\x50\x49\x53\x46\x53'\
b'\x5b\x20\x52\x52\x47\x52\x5b\x20\x52\x4e\x5b\x57\x5b\x2c\x48'\
b'\x5c\x4c\x4a\x4d\x4b\x4c\x4c\x4b\x4b\x4b\x4a\x4c\x48\x4d\x47'\
b'\x50\x46\x54\x46\x57\x47\x58\x48\x59\x4a\x59\x4c\x58\x4e\x55'\
b'\x50\x50\x52\x4e\x53\x4c\x55\x4b\x58\x4b\x5b\x20\x52\x54\x46'\
b'\x56\x47\x57\x48\x58\x4a\x58\x4c\x57\x4e\x54\x50\x50\x52\x20'\
b'\x52\x4b\x59\x4c\x58\x4e\x58\x53\x5a\x56\x5a\x58\x59\x59\x58'\
b'\x20\x52\x4e\x58\x53\x5b\x57\x5b\x58\x5a\x59\x58\x59\x56\x2e'\
b'\x48\x5c\x4c\x4a\x4d\x4b\x4c\x4c\x4b\x4b\x4b\x4a\x4c\x48\x4d'\
b'\x47\x50\x46\x54\x46\x57\x47\x58\x49\x58\x4c\x57\x4e\x54\x4f'\
b'\x51\x4f\x20\x52\x54\x46\x56\x47\x57\x49\x57\x4c\x56\x4e\x54'\
b'\x4f\x20\x52\x54\x4f\x56\x50\x58\x52\x59\x54\x59\x57\x58\x59'\
b'\x57\x5a\x54\x5b\x50\x5b\x4d\x5a\x4c\x59\x4b\x57\x4b\x56\x4c'\
b'\x55\x4d\x56\x4c\x57\x20\x52\x57\x51\x58\x54\x58\x57\x57\x59'\
b'\x56\x5a\x54\x5b\x0c\x48\x5c\x54\x48\x54\x5b\x20\x52\x55\x46'\
b'\x55\x5b\x20\x52\x55\x46\x4a\x55\x5a\x55\x20\x52\x51\x5b\x58'\
b'\x5b\x26\x48\x5c\x4d\x46\x4b\x50\x20\x52\x4b\x50\x4d\x4e\x50'\
b'\x4d\x53\x4d\x56\x4e\x58\x50\x59\x53\x59\x55\x58\x58\x56\x5a'\
b'\x53\x5b\x50\x5b\x4d\x5a\x4c\x59\x4b\x57\x4b\x56\x4c\x55\x4d'\
b'\x56\x4c\x57\x20\x52\x53\x4d\x55\x4e\x57\x50\x58\x53\x58\x55'\
b'\x57\x58\x55\x5a\x53\x5b\x20\x52\x4d\x46\x57\x46\x20\x52\x4d'\
b'\x47\x52\x47\x57\x46\x2f\x48\x5c\x57\x49\x56\x4a\x57\x4b\x58'\
b'\x4a\x58\x49\x57\x47\x55\x46\x52\x46\x4f\x47\x4d\x49\x4c\x4b'\
b'\x4b\x4f\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58'\
b'\x58\x59\x55\x59\x54\x58\x51\x56\x4f\x53\x4e\x52\x4e\x4f\x4f'\
b'\x4d\x51\x4c\x54\x20\x52\x52\x46\x50\x47\x4e\x49\x4d\x4b\x4c'\
b'\x4f\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a'\
b'\x57\x58\x58\x55\x58\x54\x57\x51\x55\x4f\x53\x4e\x1e\x48\x5c'\
b'\x4b\x46\x4b\x4c\x20\x52\x4b\x4a\x4c\x48\x4e\x46\x50\x46\x55'\
b'\x49\x57\x49\x58\x48\x59\x46\x20\x52\x4c\x48\x4e\x47\x50\x47'\
b'\x55\x49\x20\x52\x59\x46\x59\x49\x58\x4c\x54\x51\x53\x53\x52'\
b'\x56\x52\x5b\x20\x52\x58\x4c\x53\x51\x52\x53\x51\x56\x51\x5b'\
b'\x3e\x48\x5c\x50\x46\x4d\x47\x4c\x49\x4c\x4c\x4d\x4e\x50\x4f'\
b'\x54\x4f\x57\x4e\x58\x4c\x58\x49\x57\x47\x54\x46\x50\x46\x20'\
b'\x52\x50\x46\x4e\x47\x4d\x49\x4d\x4c\x4e\x4e\x50\x4f\x20\x52'\
b'\x54\x4f\x56\x4e\x57\x4c\x57\x49\x56\x47\x54\x46\x20\x52\x50'\
b'\x4f\x4d\x50\x4c\x51\x4b\x53\x4b\x57\x4c\x59\x4d\x5a\x50\x5b'\
b'\x54\x5b\x57\x5a\x58\x59\x59\x57\x59\x53\x58\x51\x57\x50\x54'\
b'\x4f\x20\x52\x50\x4f\x4e\x50\x4d\x51\x4c\x53\x4c\x57\x4d\x59'\
b'\x4e\x5a\x50\x5b\x20\x52\x54\x5b\x56\x5a\x57\x59\x58\x57\x58'\
b'\x53\x57\x51\x56\x50\x54\x4f\x2f\x48\x5c\x58\x4d\x57\x50\x55'\
b'\x52\x52\x53\x51\x53\x4e\x52\x4c\x50\x4b\x4d\x4b\x4c\x4c\x49'\
b'\x4e\x47\x51\x46\x53\x46\x56\x47\x58\x49\x59\x4c\x59\x52\x58'\
b'\x56\x57\x58\x55\x5a\x52\x5b\x4f\x5b\x4d\x5a\x4c\x58\x4c\x57'\
b'\x4d\x56\x4e\x57\x4d\x58\x20\x52\x51\x53\x4f\x52\x4d\x50\x4c'\
b'\x4d\x4c\x4c\x4d\x49\x4f\x47\x51\x46\x20\x52\x53\x46\x55\x47'\
b'\x57\x49\x58\x4c\x58\x52\x57\x56\x56\x58\x54\x5a\x52\x5b\x0b'\
b'\x4e\x56\x52\x4f\x51\x50\x52\x51\x53\x50\x52\x4f\x20\x52\x52'\
b'\x56\x51\x57\x52\x58\x53\x57\x52\x56\x0d\x4e\x56\x52\x4f\x51'\
b'\x50\x52\x51\x53\x50\x52\x4f\x20\x52\x53\x57\x52\x58\x51\x57'\
b'\x52\x56\x53\x57\x53\x59\x51\x5b\x03\x46\x5e\x5a\x49\x4a\x52'\
b'\x5a\x5b\x05\x45\x5f\x49\x4f\x5b\x4f\x20\x52\x49\x55\x5b\x55'\
b'\x03\x46\x5e\x4a\x49\x5a\x52\x4a\x5b\x1f\x49\x5b\x4d\x4a\x4e'\
b'\x4b\x4d\x4c\x4c\x4b\x4c\x4a\x4d\x48\x4e\x47\x50\x46\x53\x46'\
b'\x56\x47\x57\x48\x58\x4a\x58\x4c\x57\x4e\x56\x4f\x52\x51\x52'\
b'\x54\x20\x52\x53\x46\x55\x47\x56\x48\x57\x4a\x57\x4c\x56\x4e'\
b'\x54\x50\x20\x52\x52\x59\x51\x5a\x52\x5b\x53\x5a\x52\x59\x37'\
b'\x45\x60\x57\x4e\x56\x4c\x54\x4b\x51\x4b\x4f\x4c\x4e\x4d\x4d'\
b'\x50\x4d\x53\x4e\x55\x50\x56\x53\x56\x55\x55\x56\x53\x20\x52'\
b'\x51\x4b\x4f\x4d\x4e\x50\x4e\x53\x4f\x55\x50\x56\x20\x52\x57'\
b'\x4b\x56\x53\x56\x55\x58\x56\x5a\x56\x5c\x54\x5d\x51\x5d\x4f'\
b'\x5c\x4c\x5b\x4a\x59\x48\x57\x47\x54\x46\x51\x46\x4e\x47\x4c'\
b'\x48\x4a\x4a\x49\x4c\x48\x4f\x48\x52\x49\x55\x4a\x57\x4c\x59'\
b'\x4e\x5a\x51\x5b\x54\x5b\x57\x5a\x59\x59\x5a\x58\x20\x52\x58'\
b'\x4b\x57\x53\x57\x55\x58\x56\x11\x48\x5c\x52\x46\x4b\x5b\x20'\
b'\x52\x52\x46\x59\x5b\x20\x52\x52\x49\x58\x5b\x20\x52\x4d\x55'\
b'\x56\x55\x20\x52\x49\x5b\x4f\x5b\x20\x52\x55\x5b\x5b\x5b\x2c'\
b'\x47\x5d\x4c\x46\x4c\x5b\x20\x52\x4d\x46\x4d\x5b\x20\x52\x49'\
b'\x46\x55\x46\x58\x47\x59\x48\x5a\x4a\x5a\x4c\x59\x4e\x58\x4f'\
b'\x55\x50\x20\x52\x55\x46\x57\x47\x58\x48\x59\x4a\x59\x4c\x58'\
b'\x4e\x57\x4f\x55\x50\x20\x52\x4d\x50\x55\x50\x58\x51\x59\x52'\
b'\x5a\x54\x5a\x57\x59\x59\x58\x5a\x55\x5b\x49\x5b\x20\x52\x55'\
b'\x50\x57\x51\x58\x52\x59\x54\x59\x57\x58\x59\x57\x5a\x55\x5b'\
b'\x1f\x47\x5c\x58\x49\x59\x4c\x59\x46\x58\x49\x56\x47\x53\x46'\
b'\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4e\x4a\x53\x4b\x56\x4c'\
b'\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x59\x56\x20\x52'\
b'\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4e\x4b\x53\x4c\x56\x4d'\
b'\x58\x4f\x5a\x51\x5b\x1d\x47\x5d\x4c\x46\x4c\x5b\x20\x52\x4d'\
b'\x46\x4d\x5b\x20\x52\x49\x46\x53\x46\x56\x47\x58\x49\x59\x4b'\
b'\x5a\x4e\x5a\x53\x59\x56\x58\x58\x56\x5a\x53\x5b\x49\x5b\x20'\
b'\x52\x53\x46\x55\x47\x57\x49\x58\x4b\x59\x4e\x59\x53\x58\x56'\
b'\x57\x58\x55\x5a\x53\x5b\x15\x47\x5c\x4c\x46\x4c\x5b\x20\x52'\
b'\x4d\x46\x4d\x5b\x20\x52\x53\x4c\x53\x54\x20\x52\x49\x46\x59'\
b'\x46\x59\x4c\x58\x46\x20\x52\x4d\x50\x53\x50\x20\x52\x49\x5b'\
b'\x59\x5b\x59\x55\x58\x5b\x13\x47\x5b\x4c\x46\x4c\x5b\x20\x52'\
b'\x4d\x46\x4d\x5b\x20\x52\x53\x4c\x53\x54\x20\x52\x49\x46\x59'\
b'\x46\x59\x4c\x58\x46\x20\x52\x4d\x50\x53\x50\x20\x52\x49\x5b'\
b'\x50\x5b\x27\x47\x5e\x58\x49\x59\x4c\x59\x46\x58\x49\x56\x47'\
b'\x53\x46\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4e\x4a\x53\x4b'\
b'\x56\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x20\x52'\
b'\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4e\x4b\x53\x4c\x56\x4d'\
b'\x58\x4f\x5a\x51\x5b\x20\x52\x58\x53\x58\x5b\x20\x52\x59\x53'\
b'\x59\x5b\x20\x52\x55\x53\x5c\x53\x1a\x46\x5e\x4b\x46\x4b\x5b'\
b'\x20\x52\x4c\x46\x4c\x5b\x20\x52\x58\x46\x58\x5b\x20\x52\x59'\
b'\x46\x59\x5b\x20\x52\x48\x46\x4f\x46\x20\x52\x55\x46\x5c\x46'\
b'\x20\x52\x4c\x50\x58\x50\x20\x52\x48\x5b\x4f\x5b\x20\x52\x55'\
b'\x5b\x5c\x5b\x0b\x4d\x58\x52\x46\x52\x5b\x20\x52\x53\x46\x53'\
b'\x5b\x20\x52\x4f\x46\x56\x46\x20\x52\x4f\x5b\x56\x5b\x13\x4b'\
b'\x5a\x55\x46\x55\x57\x54\x5a\x52\x5b\x50\x5b\x4e\x5a\x4d\x58'\
b'\x4d\x56\x4e\x55\x4f\x56\x4e\x57\x20\x52\x54\x46\x54\x57\x53'\
b'\x5a\x52\x5b\x20\x52\x51\x46\x58\x46\x1a\x46\x5c\x4b\x46\x4b'\
b'\x5b\x20\x52\x4c\x46\x4c\x5b\x20\x52\x59\x46\x4c\x53\x20\x52'\
b'\x51\x4f\x59\x5b\x20\x52\x50\x4f\x58\x5b\x20\x52\x48\x46\x4f'\
b'\x46\x20\x52\x55\x46\x5b\x46\x20\x52\x48\x5b\x4f\x5b\x20\x52'\
b'\x55\x5b\x5b\x5b\x0d\x49\x5b\x4e\x46\x4e\x5b\x20\x52\x4f\x46'\
b'\x4f\x5b\x20\x52\x4b\x46\x52\x46\x20\x52\x4b\x5b\x5a\x5b\x5a'\
b'\x55\x59\x5b\x1d\x46\x5f\x4b\x46\x4b\x5b\x20\x52\x4c\x46\x52'\
b'\x58\x20\x52\x4b\x46\x52\x5b\x20\x52\x59\x46\x52\x5b\x20\x52'\
b'\x59\x46\x59\x5b\x20\x52\x5a\x46\x5a\x5b\x20\x52\x48\x46\x4c'\
b'\x46\x20\x52\x59\x46\x5d\x46\x20\x52\x48\x5b\x4e\x5b\x20\x52'\
b'\x56\x5b\x5d\x5b\x14\x47\x5e\x4c\x46\x4c\x5b\x20\x52\x4d\x46'\
b'\x59\x59\x20\x52\x4d\x48\x59\x5b\x20\x52\x59\x46\x59\x5b\x20'\
b'\x52\x49\x46\x4d\x46\x20\x52\x56\x46\x5c\x46\x20\x52\x49\x5b'\
b'\x4f\x5b\x2b\x47\x5d\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4f'\
b'\x4a\x52\x4b\x56\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58'\
b'\x58\x59\x56\x5a\x52\x5a\x4f\x59\x4b\x58\x49\x56\x47\x53\x46'\
b'\x51\x46\x20\x52\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4f\x4b'\
b'\x52\x4c\x56\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a'\
b'\x57\x58\x58\x56\x59\x52\x59\x4f\x58\x4b\x57\x49\x55\x47\x53'\
b'\x46\x1c\x47\x5d\x4c\x46\x4c\x5b\x20\x52\x4d\x46\x4d\x5b\x20'\
b'\x52\x49\x46\x55\x46\x58\x47\x59\x48\x5a\x4a\x5a\x4d\x59\x4f'\
b'\x58\x50\x55\x51\x4d\x51\x20\x52\x55\x46\x57\x47\x58\x48\x59'\
b'\x4a\x59\x4d\x58\x4f\x57\x50\x55\x51\x20\x52\x49\x5b\x50\x5b'\
b'\x3f\x47\x5d\x51\x46\x4e\x47\x4c\x49\x4b\x4b\x4a\x4f\x4a\x52'\
b'\x4b\x56\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x59'\
b'\x56\x5a\x52\x5a\x4f\x59\x4b\x58\x49\x56\x47\x53\x46\x51\x46'\
b'\x20\x52\x51\x46\x4f\x47\x4d\x49\x4c\x4b\x4b\x4f\x4b\x52\x4c'\
b'\x56\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a\x57\x58'\
b'\x58\x56\x59\x52\x59\x4f\x58\x4b\x57\x49\x55\x47\x53\x46\x20'\
b'\x52\x4e\x59\x4e\x58\x4f\x56\x51\x55\x52\x55\x54\x56\x55\x58'\
b'\x56\x5f\x57\x60\x59\x60\x5a\x5e\x5a\x5d\x20\x52\x55\x58\x56'\
b'\x5c\x57\x5e\x58\x5f\x59\x5f\x5a\x5e\x2c\x47\x5d\x4c\x46\x4c'\
b'\x5b\x20\x52\x4d\x46\x4d\x5b\x20\x52\x49\x46\x55\x46\x58\x47'\
b'\x59\x48\x5a\x4a\x5a\x4c\x59\x4e\x58\x4f\x55\x50\x4d\x50\x20'\
b'\x52\x55\x46\x57\x47\x58\x48\x59\x4a\x59\x4c\x58\x4e\x57\x4f'\
b'\x55\x50\x20\x52\x49\x5b\x50\x5b\x20\x52\x52\x50\x54\x51\x55'\
b'\x52\x58\x59\x59\x5a\x5a\x5a\x5b\x59\x20\x52\x54\x51\x55\x53'\
b'\x57\x5a\x58\x5b\x5a\x5b\x5b\x59\x5b\x58\x21\x48\x5c\x58\x49'\
b'\x59\x46\x59\x4c\x58\x49\x56\x47\x53\x46\x50\x46\x4d\x47\x4b'\
b'\x49\x4b\x4b\x4c\x4d\x4d\x4e\x4f\x4f\x55\x51\x57\x52\x59\x54'\
b'\x20\x52\x4b\x4b\x4d\x4d\x4f\x4e\x55\x50\x57\x51\x58\x52\x59'\
b'\x54\x59\x58\x57\x5a\x54\x5b\x51\x5b\x4e\x5a\x4c\x58\x4b\x55'\
b'\x4b\x5b\x4c\x58\x0f\x49\x5c\x52\x46\x52\x5b\x20\x52\x53\x46'\
b'\x53\x5b\x20\x52\x4c\x46\x4b\x4c\x4b\x46\x5a\x46\x5a\x4c\x59'\
b'\x46\x20\x52\x4f\x5b\x56\x5b\x16\x46\x5e\x4b\x46\x4b\x55\x4c'\
b'\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58\x59\x55\x59\x46'\
b'\x20\x52\x4c\x46\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x48'\
b'\x46\x4f\x46\x20\x52\x56\x46\x5c\x46\x0e\x48\x5c\x4b\x46\x52'\
b'\x5b\x20\x52\x4c\x46\x52\x58\x20\x52\x59\x46\x52\x5b\x20\x52'\
b'\x49\x46\x4f\x46\x20\x52\x55\x46\x5b\x46\x17\x46\x5e\x4a\x46'\
b'\x4e\x5b\x20\x52\x4b\x46\x4e\x56\x20\x52\x52\x46\x4e\x5b\x20'\
b'\x52\x52\x46\x56\x5b\x20\x52\x53\x46\x56\x56\x20\x52\x5a\x46'\
b'\x56\x5b\x20\x52\x47\x46\x4e\x46\x20\x52\x57\x46\x5d\x46\x14'\
b'\x48\x5c\x4b\x46\x58\x5b\x20\x52\x4c\x46\x59\x5b\x20\x52\x59'\
b'\x46\x4b\x5b\x20\x52\x49\x46\x4f\x46\x20\x52\x55\x46\x5b\x46'\
b'\x20\x52\x49\x5b\x4f\x5b\x20\x52\x55\x5b\x5b\x5b\x13\x48\x5d'\
b'\x4b\x46\x52\x51\x52\x5b\x20\x52\x4c\x46\x53\x51\x53\x5b\x20'\
b'\x52\x5a\x46\x53\x51\x20\x52\x49\x46\x4f\x46\x20\x52\x56\x46'\
b'\x5c\x46\x20\x52\x4f\x5b\x56\x5b\x0f\x48\x5c\x58\x46\x4b\x5b'\
b'\x20\x52\x59\x46\x4c\x5b\x20\x52\x4c\x46\x4b\x4c\x4b\x46\x59'\
b'\x46\x20\x52\x4b\x5b\x59\x5b\x59\x55\x58\x5b\x0b\x4b\x59\x4f'\
b'\x42\x4f\x62\x20\x52\x50\x42\x50\x62\x20\x52\x4f\x42\x56\x42'\
b'\x20\x52\x4f\x62\x56\x62\x02\x4b\x59\x4b\x46\x59\x5e\x0b\x4b'\
b'\x59\x54\x42\x54\x62\x20\x52\x55\x42\x55\x62\x20\x52\x4e\x42'\
b'\x55\x42\x20\x52\x4e\x62\x55\x62\x07\x47\x5d\x4a\x54\x52\x4f'\
b'\x5a\x54\x20\x52\x4a\x54\x52\x50\x5a\x54\x02\x48\x5c\x48\x62'\
b'\x5c\x62\x06\x4c\x58\x50\x46\x55\x4c\x20\x52\x50\x46\x4f\x47'\
b'\x55\x4c\x26\x49\x5d\x4e\x4f\x4e\x50\x4d\x50\x4d\x4f\x4e\x4e'\
b'\x50\x4d\x54\x4d\x56\x4e\x57\x4f\x58\x51\x58\x58\x59\x5a\x5a'\
b'\x5b\x20\x52\x57\x4f\x57\x58\x58\x5a\x5a\x5b\x5b\x5b\x20\x52'\
b'\x57\x51\x56\x52\x50\x53\x4d\x54\x4c\x56\x4c\x58\x4d\x5a\x50'\
b'\x5b\x53\x5b\x55\x5a\x57\x58\x20\x52\x50\x53\x4e\x54\x4d\x56'\
b'\x4d\x58\x4e\x5a\x50\x5b\x20\x47\x5c\x4c\x46\x4c\x5b\x20\x52'\
b'\x4d\x46\x4d\x5b\x20\x52\x4d\x50\x4f\x4e\x51\x4d\x53\x4d\x56'\
b'\x4e\x58\x50\x59\x53\x59\x55\x58\x58\x56\x5a\x53\x5b\x51\x5b'\
b'\x4f\x5a\x4d\x58\x20\x52\x53\x4d\x55\x4e\x57\x50\x58\x53\x58'\
b'\x55\x57\x58\x55\x5a\x53\x5b\x20\x52\x49\x46\x4d\x46\x1b\x48'\
b'\x5b\x57\x50\x56\x51\x57\x52\x58\x51\x58\x50\x56\x4e\x54\x4d'\
b'\x51\x4d\x4e\x4e\x4c\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a\x51'\
b'\x5b\x53\x5b\x56\x5a\x58\x58\x20\x52\x51\x4d\x4f\x4e\x4d\x50'\
b'\x4c\x53\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x23\x48\x5d\x57\x46'\
b'\x57\x5b\x20\x52\x58\x46\x58\x5b\x20\x52\x57\x50\x55\x4e\x53'\
b'\x4d\x51\x4d\x4e\x4e\x4c\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a'\
b'\x51\x5b\x53\x5b\x55\x5a\x57\x58\x20\x52\x51\x4d\x4f\x4e\x4d'\
b'\x50\x4c\x53\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x20\x52\x54\x46'\
b'\x58\x46\x20\x52\x57\x5b\x5b\x5b\x1e\x48\x5b\x4c\x53\x58\x53'\
b'\x58\x51\x57\x4f\x56\x4e\x54\x4d\x51\x4d\x4e\x4e\x4c\x50\x4b'\
b'\x53\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56\x5a\x58\x58'\
b'\x20\x52\x57\x53\x57\x50\x56\x4e\x20\x52\x51\x4d\x4f\x4e\x4d'\
b'\x50\x4c\x53\x4c\x55\x4d\x58\x4f\x5a\x51\x5b\x15\x4b\x58\x55'\
b'\x47\x54\x48\x55\x49\x56\x48\x56\x47\x55\x46\x53\x46\x51\x47'\
b'\x50\x49\x50\x5b\x20\x52\x53\x46\x52\x47\x51\x49\x51\x5b\x20'\
b'\x52\x4d\x4d\x55\x4d\x20\x52\x4d\x5b\x54\x5b\x3b\x49\x5c\x51'\
b'\x4d\x4f\x4e\x4e\x4f\x4d\x51\x4d\x53\x4e\x55\x4f\x56\x51\x57'\
b'\x53\x57\x55\x56\x56\x55\x57\x53\x57\x51\x56\x4f\x55\x4e\x53'\
b'\x4d\x51\x4d\x20\x52\x4f\x4e\x4e\x50\x4e\x54\x4f\x56\x20\x52'\
b'\x55\x56\x56\x54\x56\x50\x55\x4e\x20\x52\x56\x4f\x57\x4e\x59'\
b'\x4d\x59\x4e\x57\x4e\x20\x52\x4e\x55\x4d\x56\x4c\x58\x4c\x59'\
b'\x4d\x5b\x50\x5c\x55\x5c\x58\x5d\x59\x5e\x20\x52\x4c\x59\x4d'\
b'\x5a\x50\x5b\x55\x5b\x58\x5c\x59\x5e\x59\x5f\x58\x61\x55\x62'\
b'\x4f\x62\x4c\x61\x4b\x5f\x4b\x5e\x4c\x5c\x4f\x5b\x1b\x47\x5d'\
b'\x4c\x46\x4c\x5b\x20\x52\x4d\x46\x4d\x5b\x20\x52\x4d\x50\x4f'\
b'\x4e\x52\x4d\x54\x4d\x57\x4e\x58\x50\x58\x5b\x20\x52\x54\x4d'\
b'\x56\x4e\x57\x50\x57\x5b\x20\x52\x49\x46\x4d\x46\x20\x52\x49'\
b'\x5b\x50\x5b\x20\x52\x54\x5b\x5b\x5b\x11\x4d\x58\x52\x46\x51'\
b'\x47\x52\x48\x53\x47\x52\x46\x20\x52\x52\x4d\x52\x5b\x20\x52'\
b'\x53\x4d\x53\x5b\x20\x52\x4f\x4d\x53\x4d\x20\x52\x4f\x5b\x56'\
b'\x5b\x18\x4d\x58\x53\x46\x52\x47\x53\x48\x54\x47\x53\x46\x20'\
b'\x52\x54\x4d\x54\x5f\x53\x61\x51\x62\x4f\x62\x4e\x61\x4e\x60'\
b'\x4f\x5f\x50\x60\x4f\x61\x20\x52\x53\x4d\x53\x5f\x52\x61\x51'\
b'\x62\x20\x52\x50\x4d\x54\x4d\x1a\x47\x5c\x4c\x46\x4c\x5b\x20'\
b'\x52\x4d\x46\x4d\x5b\x20\x52\x57\x4d\x4d\x57\x20\x52\x52\x53'\
b'\x58\x5b\x20\x52\x51\x53\x57\x5b\x20\x52\x49\x46\x4d\x46\x20'\
b'\x52\x54\x4d\x5a\x4d\x20\x52\x49\x5b\x50\x5b\x20\x52\x54\x5b'\
b'\x5a\x5b\x0b\x4d\x58\x52\x46\x52\x5b\x20\x52\x53\x46\x53\x5b'\
b'\x20\x52\x4f\x46\x53\x46\x20\x52\x4f\x5b\x56\x5b\x2b\x42\x63'\
b'\x47\x4d\x47\x5b\x20\x52\x48\x4d\x48\x5b\x20\x52\x48\x50\x4a'\
b'\x4e\x4d\x4d\x4f\x4d\x52\x4e\x53\x50\x53\x5b\x20\x52\x4f\x4d'\
b'\x51\x4e\x52\x50\x52\x5b\x20\x52\x53\x50\x55\x4e\x58\x4d\x5a'\
b'\x4d\x5d\x4e\x5e\x50\x5e\x5b\x20\x52\x5a\x4d\x5c\x4e\x5d\x50'\
b'\x5d\x5b\x20\x52\x44\x4d\x48\x4d\x20\x52\x44\x5b\x4b\x5b\x20'\
b'\x52\x4f\x5b\x56\x5b\x20\x52\x5a\x5b\x61\x5b\x1b\x47\x5d\x4c'\
b'\x4d\x4c\x5b\x20\x52\x4d\x4d\x4d\x5b\x20\x52\x4d\x50\x4f\x4e'\
b'\x52\x4d\x54\x4d\x57\x4e\x58\x50\x58\x5b\x20\x52\x54\x4d\x56'\
b'\x4e\x57\x50\x57\x5b\x20\x52\x49\x4d\x4d\x4d\x20\x52\x49\x5b'\
b'\x50\x5b\x20\x52\x54\x5b\x5b\x5b\x23\x48\x5c\x51\x4d\x4e\x4e'\
b'\x4c\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x56'\
b'\x5a\x58\x58\x59\x55\x59\x53\x58\x50\x56\x4e\x53\x4d\x51\x4d'\
b'\x20\x52\x51\x4d\x4f\x4e\x4d\x50\x4c\x53\x4c\x55\x4d\x58\x4f'\
b'\x5a\x51\x5b\x20\x52\x53\x5b\x55\x5a\x57\x58\x58\x55\x58\x53'\
b'\x57\x50\x55\x4e\x53\x4d\x23\x47\x5c\x4c\x4d\x4c\x62\x20\x52'\
b'\x4d\x4d\x4d\x62\x20\x52\x4d\x50\x4f\x4e\x51\x4d\x53\x4d\x56'\
b'\x4e\x58\x50\x59\x53\x59\x55\x58\x58\x56\x5a\x53\x5b\x51\x5b'\
b'\x4f\x5a\x4d\x58\x20\x52\x53\x4d\x55\x4e\x57\x50\x58\x53\x58'\
b'\x55\x57\x58\x55\x5a\x53\x5b\x20\x52\x49\x4d\x4d\x4d\x20\x52'\
b'\x49\x62\x50\x62\x20\x48\x5c\x57\x4d\x57\x62\x20\x52\x58\x4d'\
b'\x58\x62\x20\x52\x57\x50\x55\x4e\x53\x4d\x51\x4d\x4e\x4e\x4c'\
b'\x50\x4b\x53\x4b\x55\x4c\x58\x4e\x5a\x51\x5b\x53\x5b\x55\x5a'\
b'\x57\x58\x20\x52\x51\x4d\x4f\x4e\x4d\x50\x4c\x53\x4c\x55\x4d'\
b'\x58\x4f\x5a\x51\x5b\x20\x52\x54\x62\x5b\x62\x16\x49\x5a\x4e'\
b'\x4d\x4e\x5b\x20\x52\x4f\x4d\x4f\x5b\x20\x52\x4f\x53\x50\x50'\
b'\x52\x4e\x54\x4d\x57\x4d\x58\x4e\x58\x4f\x57\x50\x56\x4f\x57'\
b'\x4e\x20\x52\x4b\x4d\x4f\x4d\x20\x52\x4b\x5b\x52\x5b\x1f\x4a'\
b'\x5b\x57\x4f\x58\x4d\x58\x51\x57\x4f\x56\x4e\x54\x4d\x50\x4d'\
b'\x4e\x4e\x4d\x4f\x4d\x51\x4e\x52\x50\x53\x55\x55\x57\x56\x58'\
b'\x57\x20\x52\x4d\x50\x4e\x51\x50\x52\x55\x54\x57\x55\x58\x56'\
b'\x58\x59\x57\x5a\x55\x5b\x51\x5b\x4f\x5a\x4e\x59\x4d\x57\x4d'\
b'\x5b\x4e\x59\x0f\x4b\x5a\x50\x46\x50\x57\x51\x5a\x53\x5b\x55'\
b'\x5b\x57\x5a\x58\x58\x20\x52\x51\x46\x51\x57\x52\x5a\x53\x5b'\
b'\x20\x52\x4d\x4d\x55\x4d\x1b\x47\x5d\x4c\x4d\x4c\x58\x4d\x5a'\
b'\x50\x5b\x52\x5b\x55\x5a\x57\x58\x20\x52\x4d\x4d\x4d\x58\x4e'\
b'\x5a\x50\x5b\x20\x52\x57\x4d\x57\x5b\x20\x52\x58\x4d\x58\x5b'\
b'\x20\x52\x49\x4d\x4d\x4d\x20\x52\x54\x4d\x58\x4d\x20\x52\x57'\
b'\x5b\x5b\x5b\x0e\x49\x5b\x4c\x4d\x52\x5b\x20\x52\x4d\x4d\x52'\
b'\x59\x20\x52\x58\x4d\x52\x5b\x20\x52\x4a\x4d\x50\x4d\x20\x52'\
b'\x54\x4d\x5a\x4d\x17\x46\x5e\x4a\x4d\x4e\x5b\x20\x52\x4b\x4d'\
b'\x4e\x58\x20\x52\x52\x4d\x4e\x5b\x20\x52\x52\x4d\x56\x5b\x20'\
b'\x52\x53\x4d\x56\x58\x20\x52\x5a\x4d\x56\x5b\x20\x52\x47\x4d'\
b'\x4e\x4d\x20\x52\x57\x4d\x5d\x4d\x14\x48\x5c\x4c\x4d\x57\x5b'\
b'\x20\x52\x4d\x4d\x58\x5b\x20\x52\x58\x4d\x4c\x5b\x20\x52\x4a'\
b'\x4d\x50\x4d\x20\x52\x54\x4d\x5a\x4d\x20\x52\x4a\x5b\x50\x5b'\
b'\x20\x52\x54\x5b\x5a\x5b\x15\x48\x5b\x4c\x4d\x52\x5b\x20\x52'\
b'\x4d\x4d\x52\x59\x20\x52\x58\x4d\x52\x5b\x50\x5f\x4e\x61\x4c'\
b'\x62\x4b\x62\x4a\x61\x4b\x60\x4c\x61\x20\x52\x4a\x4d\x50\x4d'\
b'\x20\x52\x54\x4d\x5a\x4d\x0f\x49\x5b\x57\x4d\x4c\x5b\x20\x52'\
b'\x58\x4d\x4d\x5b\x20\x52\x4d\x4d\x4c\x51\x4c\x4d\x58\x4d\x20'\
b'\x52\x4c\x5b\x58\x5b\x58\x57\x57\x5b\x27\x4b\x59\x54\x42\x52'\
b'\x43\x51\x44\x50\x46\x50\x48\x51\x4a\x52\x4b\x53\x4d\x53\x4f'\
b'\x51\x51\x20\x52\x52\x43\x51\x45\x51\x47\x52\x49\x53\x4a\x54'\
b'\x4c\x54\x4e\x53\x50\x4f\x52\x53\x54\x54\x56\x54\x58\x53\x5a'\
b'\x52\x5b\x51\x5d\x51\x5f\x52\x61\x20\x52\x51\x53\x53\x55\x53'\
b'\x57\x52\x59\x51\x5a\x50\x5c\x50\x5e\x51\x60\x52\x61\x54\x62'\
b'\x02\x4e\x56\x52\x42\x52\x62\x27\x4b\x59\x50\x42\x52\x43\x53'\
b'\x44\x54\x46\x54\x48\x53\x4a\x52\x4b\x51\x4d\x51\x4f\x53\x51'\
b'\x20\x52\x52\x43\x53\x45\x53\x47\x52\x49\x51\x4a\x50\x4c\x50'\
b'\x4e\x51\x50\x55\x52\x51\x54\x50\x56\x50\x58\x51\x5a\x52\x5b'\
b'\x53\x5d\x53\x5f\x52\x61\x20\x52\x53\x53\x51\x55\x51\x57\x52'\
b'\x59\x53\x5a\x54\x5c\x54\x5e\x53\x60\x52\x61\x50\x62\x17\x46'\
b'\x5e\x49\x55\x49\x53\x4a\x50\x4c\x4f\x4e\x4f\x50\x50\x54\x53'\
b'\x56\x54\x58\x54\x5a\x53\x5b\x51\x20\x52\x49\x53\x4a\x51\x4c'\
b'\x50\x4e\x50\x50\x51\x54\x54\x56\x55\x58\x55\x5a\x54\x5b\x51'\
b'\x5b\x4f\x22\x4a\x5a\x4a\x46\x4a\x5b\x4b\x5b\x4b\x46\x4c\x46'\
b'\x4c\x5b\x4d\x5b\x4d\x46\x4e\x46\x4e\x5b\x4f\x5b\x4f\x46\x50'\
b'\x46\x50\x5b\x51\x5b\x51\x46\x52\x46\x52\x5b\x53\x5b\x53\x46'\
b'\x54\x46\x54\x5b\x55\x5b\x55\x46\x56\x46\x56\x5b\x57\x5b\x57'\
b'\x46\x58\x46\x58\x5b\x59\x5b\x59\x46\x5a\x46\x5a\x5b'
_index =\
b'\x00\x00\x03\x00\x22\x00\x4f\x00\x68\x00\xbd\x00\xfe\x00\x61'\
b'\x01\x6e\x01\x97\x01\xc0\x01\xd3\x01\xe0\x01\xf1\x01\xf8\x01'\
b'\x05\x02\x0c\x02\x5d\x02\x74\x02\xcf\x02\x2e\x03\x49\x03\x98'\
b'\x03\xf9\x03\x38\x04\xb7\x04\x18\x05\x31\x05\x4e\x05\x57\x05'\
b'\x64\x05\x6d\x05\xae\x05\x1f\x06\x44\x06\x9f\x06\xe0\x06\x1d'\
b'\x07\x4a\x07\x73\x07\xc4\x07\xfb\x07\x14\x08\x3d\x08\x74\x08'\
b'\x91\x08\xce\x08\xf9\x08\x52\x09\x8d\x09\x0e\x0a\x69\x0a\xae'\
b'\x0a\xcf\x0a\xfe\x0a\x1d\x0b\x4e\x0b\x79\x0b\xa2\x0b\xc3\x0b'\
b'\xdc\x0b\xe3\x0b\xfc\x0b\x0d\x0c\x14\x0c\x23\x0c\x72\x0c\xb5'\
b'\x0c\xee\x0c\x37\x0d\x76\x0d\xa3\x0d\x1c\x0e\x55\x0e\x7a\x0e'\
b'\xad\x0e\xe4\x0e\xfd\x0e\x56\x0f\x8f\x0f\xd8\x0f\x21\x10\x64'\
b'\x10\x93\x10\xd4\x10\xf5\x10\x2e\x11\x4d\x11\x7e\x11\xa9\x11'\
b'\xd6\x11\xf7\x11\x48\x12\x4f\x12\xa0\x12\xd1\x12'
INDEX = memoryview(_index)
FONT = memoryview(_font)
| 63.062678
| 64
| 0.707567
| 5,435
| 22,135
| 2.880957
| 0.027047
| 0.107677
| 0.0684
| 0.012262
| 0.461042
| 0.380508
| 0.325393
| 0.276408
| 0.234066
| 0.201175
| 0
| 0.380139
| 0.016445
| 22,135
| 350
| 65
| 63.242857
| 0.339075
| 0
| 0
| 0.028818
| 0
| 0.976945
| 0.918003
| 0.918003
| 0
| 1
| 0.000361
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3301c7fa1e602409a8c75b49cd17175439693d61
| 1,220
|
py
|
Python
|
api/app/customer/schemas.py
|
Sguerreroo/electricity-market-analysis
|
32576f65a056ec1c94f098273dabdd1c2090e9f6
|
[
"MIT"
] | null | null | null |
api/app/customer/schemas.py
|
Sguerreroo/electricity-market-analysis
|
32576f65a056ec1c94f098273dabdd1c2090e9f6
|
[
"MIT"
] | null | null | null |
api/app/customer/schemas.py
|
Sguerreroo/electricity-market-analysis
|
32576f65a056ec1c94f098273dabdd1c2090e9f6
|
[
"MIT"
] | null | null | null |
from marshmallow import Schema, fields, validate
class CustomerSchema(Schema):
name = fields.Str(
required=True,
error_messages={"required": "Introduce tu nombre"},
validate=[
validate.Length(
max=255,
error="Campo demasiado largo"
)
]
)
surname = fields.Str(
required=True,
error_messages={"required": "Introduce tus apellidos"},
validate=[
validate.Length(
max=255,
error="Campo demasiado largo"
)
]
)
nif = fields.Str(
required=True,
error_messages={"required": "Introduce tu nif"},
validate=[
validate.Regexp(
r"^\d{8}[a-zA-Z]$",
error="Introduce un nif válido"
)
]
)
email = fields.Email(error_messages={"invalid": "Introduce un email válido"})
class ProfileCustomerSchema(Schema):
name = fields.Str(
required=True,
error_messages={"required": "Introduce tu nombre"},
validate=[
validate.Length(
max=255,
error="Campo demasiado largo"
)
]
)
surname = fields.Str(
required=True,
error_messages={"required": "Introduce tus apellidos"},
validate=[
validate.Length(
max=255,
error="Campo demasiado largo"
)
]
)
email = fields.Email(error_messages={"invalid": "Introduce un email válido"})
| 20.677966
| 78
| 0.662295
| 137
| 1,220
| 5.846715
| 0.284672
| 0.113608
| 0.106117
| 0.131086
| 0.817728
| 0.817728
| 0.817728
| 0.817728
| 0.817728
| 0.751561
| 0
| 0.013265
| 0.196721
| 1,220
| 59
| 79
| 20.677966
| 0.804082
| 0
| 0
| 0.581818
| 0
| 0
| 0.266994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018182
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
330c84322598b728ad154dbe8a530d87c6116950
| 1,218
|
py
|
Python
|
qiling/qiling/cc/arm.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/qiling/cc/arm.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/qiling/cc/arm.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
from unicorn.arm_const import UC_ARM_REG_R0, UC_ARM_REG_R1, UC_ARM_REG_R2, UC_ARM_REG_R3
from unicorn.arm64_const import (
UC_ARM64_REG_X0, UC_ARM64_REG_X1, UC_ARM64_REG_X2, UC_ARM64_REG_X3,
UC_ARM64_REG_X4, UC_ARM64_REG_X5, UC_ARM64_REG_X6, UC_ARM64_REG_X7
)
from qiling import Qiling
from . import QlCommonBaseCC
class aarch64(QlCommonBaseCC):
_argregs = (UC_ARM64_REG_X0, UC_ARM64_REG_X1, UC_ARM64_REG_X2, UC_ARM64_REG_X3, UC_ARM64_REG_X4, UC_ARM64_REG_X5, UC_ARM64_REG_X6, UC_ARM64_REG_X7) + (None, ) * 8
def __init__(self, ql: Qiling) -> None:
super().__init__(ql, UC_ARM64_REG_X0)
@staticmethod
def getNumSlots(argbits: int) -> int:
return 1
def unwind(self, nslots: int) -> int:
# TODO: cleanup?
return self.ql.arch.stack_pop()
class aarch32(QlCommonBaseCC):
_argregs = (UC_ARM_REG_R0, UC_ARM_REG_R1, UC_ARM_REG_R2, UC_ARM_REG_R3) + (None, ) * 12
def __init__(self, ql: Qiling) -> None:
super().__init__(ql, UC_ARM_REG_R0)
@staticmethod
def getNumSlots(argbits: int) -> int:
return 1
def unwind(self, nslots: int) -> int:
# TODO: cleanup?
return self.ql.arch.stack_pop()
| 29.707317
| 163
| 0.763547
| 208
| 1,218
| 3.990385
| 0.288462
| 0.143373
| 0.204819
| 0.036145
| 0.66988
| 0.66988
| 0.66988
| 0.66988
| 0.66988
| 0.66988
| 0
| 0.068311
| 0.134647
| 1,218
| 40
| 164
| 30.45
| 0.719165
| 0.103448
| 0
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0
| 1
| 0.24
| false
| 0
| 0.16
| 0.16
| 0.72
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
683b48a041071ed728c9b592e467586fca8e65f8
| 5,524
|
py
|
Python
|
gslab_misc/gencat/tests/test_zipFile.py
|
AakaashRao/gslab_python
|
864f708ec80f4381235506489b8c117e54e16450
|
[
"MIT"
] | 12
|
2017-03-03T20:48:50.000Z
|
2020-11-27T23:37:15.000Z
|
gslab_misc/gencat/tests/test_zipFile.py
|
AakaashRao/gslab_python
|
864f708ec80f4381235506489b8c117e54e16450
|
[
"MIT"
] | 132
|
2017-01-11T23:32:01.000Z
|
2022-03-31T17:00:06.000Z
|
gslab_misc/gencat/tests/test_zipFile.py
|
AakaashRao/gslab_python
|
864f708ec80f4381235506489b8c117e54e16450
|
[
"MIT"
] | 10
|
2017-07-22T02:35:29.000Z
|
2021-02-16T00:09:44.000Z
|
import unittest
import os
import shutil
import zipfile
import sys
# Ensure the script is run from its own directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../../')
from gencat import gencat
class MockCat(gencat):
def makeZipDict(self):
pass
def makeConcatDict(self):
pass
class test_zipFiles(unittest.TestCase):
def setUp(self):
paths = ['./test_data', './test_temp', './test_out']
for path in paths:
try:
os.makedirs(path)
except:
shutil.rmtree(path, ignore_errors = True)
os.makedirs(path)
for FILE in ['./test_data/file1.txt', './test_data/file2.txt']:
with open(FILE, 'wb') as f:
f.write('''THIS IS A TEST FILE.\n''')
def test_oneFile(self):
'''
Test that contentation functions for a single file.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text = f.read()
self.assertEqual(text, '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.\n')
def test_twoFile(self):
'''
Test that two text files are concatenated into one without loss of content.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', ) + ('./test_data/file2.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text = f.read()
test_text = '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.' + \
'\n\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS A TEST FILE.\n'
self.assertEqual(text, test_text)
def test_twoZips(self):
'''
Test that two files can be concatenated to different text files and stored in separate zip files.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', )
testcat.zip_dict['zip2'] = ('concat2', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', )
testcat.concat_dict['concat2'] = ('./test_data/file2.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(os.path.isfile('./test_out/zip2.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip2.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with zipfile.ZipFile('./test_out/zip2.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text1 = f.read()
with open('./test_out/zip2/concat2.txt', 'rU') as f:
text2 = f.read()
self.assertEqual(text1, '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.\n')
self.assertEqual(text2, '\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS A TEST FILE.\n')
def test_twoConcatsOneZip(self):
'''
Test that two files can be concatenated to different text files and stored in the same zip file.
'''
testcat = MockCat('./test_data', './test_temp', './test_out')
testcat.zip_dict = {}
testcat.zip_dict['zip1'] = ('concat1', ) + ('concat2', )
testcat.concat_dict = {}
testcat.concat_dict['concat1'] = ('./test_data/file1.txt', )
testcat.concat_dict['concat2'] = ('./test_data/file2.txt', )
testcat.zipFiles()
self.assertTrue(os.path.isfile('./test_out/zip1.zip'))
self.assertTrue(zipfile.is_zipfile('./test_out/zip1.zip'))
with zipfile.ZipFile('./test_out/zip1.zip', 'r') as zf:
zf.extractall('./test_out/')
with open('./test_out/zip1/concat1.txt', 'rU') as f:
text1 = f.read()
with open('./test_out/zip1/concat2.txt', 'rU') as f:
text2 = f.read()
self.assertEqual(text1, '\nNEWFILE\nFILENAME: file1.txt\n\nTHIS IS A TEST FILE.\n')
self.assertEqual(text2, '\nNEWFILE\nFILENAME: file2.txt\n\nTHIS IS A TEST FILE.\n')
def tearDown(self):
paths = ['./test_data', './test_temp', './test_out']
for path in paths:
shutil.rmtree(path, ignore_errors = True)
if __name__ == '__main__':
unittest.main()
| 36.826667
| 105
| 0.568248
| 680
| 5,524
| 4.472059
| 0.161765
| 0.07366
| 0.061493
| 0.055245
| 0.800723
| 0.789872
| 0.768826
| 0.75633
| 0.745807
| 0.745807
| 0
| 0.01692
| 0.272448
| 5,524
| 149
| 106
| 37.073826
| 0.739736
| 0.067161
| 0
| 0.637255
| 0
| 0
| 0.283455
| 0.07373
| 0
| 0
| 0
| 0
| 0.156863
| 1
| 0.078431
| false
| 0.019608
| 0.058824
| 0
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6841ded7f748c97b1c2bed89db300573835d7f7c
| 2,272
|
py
|
Python
|
gns3.py
|
packetscaper/CCIE-RS-Lab
|
40710a1d66a9f617488ffb3deedfa530989c2f98
|
[
"MIT"
] | 31
|
2019-07-31T14:42:02.000Z
|
2022-03-29T16:38:37.000Z
|
gns3.py
|
packetscaper/CCIE-RS-Lab
|
40710a1d66a9f617488ffb3deedfa530989c2f98
|
[
"MIT"
] | 1
|
2021-03-26T16:29:04.000Z
|
2021-07-18T14:15:09.000Z
|
gns3.py
|
packetscaper/CCIE-RS-Lab
|
40710a1d66a9f617488ffb3deedfa530989c2f98
|
[
"MIT"
] | 10
|
2019-07-28T08:02:07.000Z
|
2022-02-25T10:05:19.000Z
|
import yaml
from LabConnection import *
import threading
import requests,json,time,yaml
class Gns3:
def __init__(self):
with open('yamlfiles/console.yaml') as f:
o = yaml.safe_load(f)
self.gns3_host = "http://"+o["gns3_host_ip"]+":3080/"
def start(self,device):
url = self.gns3_host+"v2/projects/"
headers = {"Accept":"application/json","Content-Type":"application/json"}
with open('topology.gns3') as f:
json_output = json.loads(f.read())
project_id = json_output["project_id"]
url = url + project_id
for node in json_output['topology']['nodes']:
if node['name'] == device:
node_id = node['node_id']
print "starting ",device
response = requests.request("POST",url+"/nodes/"+node_id+"/start")
def stop(self,device):
url = self.gns3_host+ "v2/projects/"
headers = {"Accept":"application/json","Content-Type":"application/json"}
with open('topology.gns3') as f:
json_output = json.loads(f.read())
project_id = json_output["project_id"]
url = url + project_id
for node in json_output['topology']['nodes']:
if node['name'] == device:
node_id = node['node_id']
print "stopping ", device
response = requests.request("POST",url+"/nodes/"+node_id+"/stop")
def stop_all(self):
threads = []
with open('topology.gns3') as f:
json_output = json.loads(f.read())
project_id = json_output["project_id"]
for node in json_output['topology']['nodes']:
threads.append(threading.Thread(target=self.stop,args=(node['name'],)))
for t in threads:
t.start()
for t in threads:
t.join()
def start_all(self):
threads = []
with open('topology.gns3') as f:
json_output = json.loads(f.read())
project_id = json_output["project_id"]
for node in json_output['topology']['nodes']:
threads.append(threading.Thread(target=self.start,args=(node['name'],)))
for t in threads:
t.start()
for t in threads:
t.join()
def reset_lab(self):
print "reseting lab"
self.stop_all()
self.start_all()
| 28.049383
| 82
| 0.588468
| 291
| 2,272
| 4.446735
| 0.226804
| 0.092736
| 0.049459
| 0.061824
| 0.782071
| 0.782071
| 0.782071
| 0.782071
| 0.782071
| 0.709428
| 0
| 0.008939
| 0.261444
| 2,272
| 80
| 83
| 28.4
| 0.762217
| 0
| 0
| 0.610169
| 0
| 0
| 0.179577
| 0.009683
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.067797
| null | null | 0.050847
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
686a0cb1da548004ea57c95432efe5601832a90e
| 27
|
py
|
Python
|
meiduo_mall/meiduo_mall/settings/__init__.py
|
linbo-boy/Meiduo-Shopping-Mall
|
f5a46e742f27d33a25dffc47c3fb34914c7c59b1
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/settings/__init__.py
|
linbo-boy/Meiduo-Shopping-Mall
|
f5a46e742f27d33a25dffc47c3fb34914c7c59b1
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/settings/__init__.py
|
linbo-boy/Meiduo-Shopping-Mall
|
f5a46e742f27d33a25dffc47c3fb34914c7c59b1
|
[
"MIT"
] | null | null | null |
# 存放配置文件的目录,分为开发dev和线上prod
| 13.5
| 26
| 0.851852
| 2
| 27
| 11.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 27
| 1
| 27
| 27
| 0.92
| 0.888889
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7ab6ab61d4663a28865678ca623c22ecd6b84f9
| 584
|
py
|
Python
|
tests/test.py
|
davidbistolas/py-simple-audio
|
ad8afd2086c8570ecfb29e6b404fc4a5849c1255
|
[
"MIT"
] | 121
|
2015-12-01T06:18:31.000Z
|
2022-03-14T11:46:10.000Z
|
tests/test.py
|
davidbistolas/py-simple-audio
|
ad8afd2086c8570ecfb29e6b404fc4a5849c1255
|
[
"MIT"
] | 51
|
2015-09-08T18:47:44.000Z
|
2022-01-07T14:34:44.000Z
|
tests/test.py
|
davidbistolas/py-simple-audio
|
ad8afd2086c8570ecfb29e6b404fc4a5849c1255
|
[
"MIT"
] | 28
|
2016-03-09T17:10:58.000Z
|
2022-03-08T23:24:35.000Z
|
import simpleaudio as sa
import unittest
class TestSimpleaudio(unittest.TestCase):
def test_num_channels(self):
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 0, 2, 44100)
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 3, 2, 44100)
def test_bytes_per_chan(self):
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 2, 0, 44100)
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 2, 5, 44100)
def test_sample_rate(self):
self.assertRaises(ValueError, sa.play_buffer, b'\0' * 16, 2, 2, 44101)
| 32.444444
| 78
| 0.674658
| 86
| 584
| 4.44186
| 0.348837
| 0.209424
| 0.340314
| 0.366492
| 0.615183
| 0.615183
| 0.615183
| 0.615183
| 0.615183
| 0.615183
| 0
| 0.105932
| 0.191781
| 584
| 17
| 79
| 34.352941
| 0.70339
| 0
| 0
| 0
| 0
| 0
| 0.017123
| 0
| 0
| 0
| 0
| 0
| 0.454545
| 1
| 0.272727
| false
| 0
| 0.181818
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bd20c4cfdbf17e0475d21a04f34f83082a3a7fe
| 31
|
py
|
Python
|
tests/__init__.py
|
gitter-badger/vcspull
|
9584c6d40fca8e9f36970894ce620a891723d9b5
|
[
"MIT"
] | 169
|
2015-01-13T14:57:28.000Z
|
2018-02-17T13:40:58.000Z
|
tests/__init__.py
|
gitter-badger/vcspull
|
9584c6d40fca8e9f36970894ce620a891723d9b5
|
[
"MIT"
] | 198
|
2018-03-11T19:11:14.000Z
|
2022-03-26T23:01:08.000Z
|
tests/__init__.py
|
gitter-badger/vcspull
|
9584c6d40fca8e9f36970894ce620a891723d9b5
|
[
"MIT"
] | 9
|
2015-01-05T13:37:19.000Z
|
2016-11-25T05:40:01.000Z
|
from . import fixtures # noqa
| 15.5
| 30
| 0.709677
| 4
| 31
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 31
| 1
| 31
| 31
| 0.916667
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
040cb43b42a679bae7e1cea30ce8644b92a7c6b6
| 50,243
|
py
|
Python
|
txdav/common/datastore/podding/test/test_conduit.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 462
|
2016-08-14T17:43:24.000Z
|
2022-03-17T07:38:16.000Z
|
txdav/common/datastore/podding/test/test_conduit.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 72
|
2016-09-01T23:19:35.000Z
|
2020-02-05T02:09:26.000Z
|
txdav/common/datastore/podding/test/test_conduit.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 171
|
2016-08-16T03:50:30.000Z
|
2022-03-26T11:49:55.000Z
|
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from pycalendar.datetime import DateTime
from pycalendar.period import Period
from twext.python.clsprop import classproperty
import txweb2.dav.test.util
from txweb2.http_headers import MimeType
from txweb2.stream import MemoryStream
from twisted.internet.defer import inlineCallbacks, succeed, returnValue
from twistedcaldav import caldavxml
from twistedcaldav.ical import Component, normalize_iCalStr
from txdav.caldav.datastore.query.filter import Filter
from txdav.caldav.datastore.scheduling.cuaddress import calendarUserFromCalendarUserAddress
from txdav.caldav.datastore.scheduling.freebusy import FreebusyQuery
from txdav.caldav.datastore.scheduling.ischedule.localservers import ServersDB, Server
from txdav.caldav.datastore.sql import ManagedAttachment, AttachmentLink
from txdav.caldav.datastore.test.common import CaptureProtocol
from txdav.common.datastore.podding.conduit import PoddingConduit, \
FailedCrossPodRequestError
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.podding.test.util import MultiStoreConduitTest, \
FakeConduitRequest
from txdav.common.datastore.sql_tables import _BIND_STATUS_ACCEPTED
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
from txdav.common.icommondatastore import ObjectResourceNameAlreadyExistsError, \
ObjectResourceNameNotAllowedError
from txdav.common.idirectoryservice import DirectoryRecordNotFoundError
class TestConduit (CommonCommonTests, txweb2.dav.test.util.TestCase):
class FakeConduit(object):
def recv_fake(self, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
@inlineCallbacks
def setUp(self):
yield super(TestConduit, self).setUp()
serversDB = ServersDB()
serversDB.addServer(Server("A", "http://127.0.0.1", "A", True))
serversDB.addServer(Server("B", "http://127.0.0.2", "B", False))
yield self.buildStoreAndDirectory(serversDB=serversDB)
self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): # @NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def test_validRequest(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
conduit = PoddingConduit(self.storeUnderTest())
r1, r2 = yield conduit.validRequest("user01", "puser02")
self.assertTrue(r1 is not None)
self.assertTrue(r2 is not None)
yield self.assertFailure(
conduit.validRequest("bogus01", "user02"),
DirectoryRecordNotFoundError
)
yield self.assertFailure(
conduit.validRequest("user01", "bogus02"),
DirectoryRecordNotFoundError
)
yield self.assertFailure(
conduit.validRequest("user01", "user02"),
FailedCrossPodRequestError
)
class TestConduitToConduit(MultiStoreConduitTest):
class FakeConduit(PoddingConduit):
@inlineCallbacks
def send_fake(self, txn, ownerUID, shareeUID):
_ignore_owner, sharee = yield self.validRequest(ownerUID, shareeUID)
action = {
"action": "fake",
"echo": "bravo"
}
result = yield self.sendRequest(txn, sharee, action)
returnValue(result)
def recv_fake(self, txn, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
def makeConduit(self, store):
"""
Use our own variant.
"""
conduit = self.FakeConduit(store)
conduit.conduitRequestClass = FakeConduitRequest
return conduit
@inlineCallbacks
def test_fake_action(self):
"""
Cross-pod request works when conduit does support the action.
"""
store = self.theStoreUnderTest(0)
response = yield store.conduit.send_fake(self.theTransactionUnderTest(0), "user01", "puser01")
self.assertEqual(response, {"back2u": "bravo", "more": "bits"})
yield self.commitTransaction(0)
store = self.theStoreUnderTest(1)
response = yield store.conduit.send_fake(self.theTransactionUnderTest(1), "puser01", "user01")
self.assertEqual(response, {"back2u": "bravo", "more": "bits"})
yield self.commitTransaction(1)
class TestConduitAPI(MultiStoreConduitTest):
"""
Test that the conduit api works.
"""
nowYear = {"now": DateTime.getToday().getYear()}
caldata1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:{now:04d}0102T140000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata1_changed = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid1
DTSTART:{now:04d}0102T150000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance changed
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid2
DTSTART:{now:04d}0102T160000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
caldata3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:uid3
DTSTART:{now:04d}0102T160000Z
DURATION:PT1H
CREATED:20060102T190000Z
DTSTAMP:20051222T210507Z
RRULE:FREQ=WEEKLY
SUMMARY:instance
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n").format(**nowYear)
@inlineCallbacks
def test_basic_share(self):
"""
Test that basic invite/uninvite works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
shared = yield calendar1.shareeView("puser01")
self.assertEqual(shared.shareStatus(), _BIND_STATUS_ACCEPTED)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
self.assertTrue(shared is not None)
self.assertTrue(shared.external())
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.uninviteUIDFromShare("puser01")
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
self.assertTrue(shared is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_countobjects(self):
"""
Test that action=countobjects works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
count = yield calendar1.countObjectResources()
self.assertEqual(count, 1)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 1)
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
count = yield shared.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(1)
@inlineCallbacks
def test_listobjects(self):
"""
Test that action=listobjects works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set())
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
objects = yield calendar1.listObjectResources()
self.assertEqual(set(objects), set(("1.ics", "2.ics",)))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set(("1.ics", "2.ics",)))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
objects = yield calendar1.listObjectResources()
self.assertEqual(set(objects), set(("2.ics",)))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
objects = yield shared.listObjectResources()
self.assertEqual(set(objects), set(("2.ics",)))
yield self.commitTransaction(1)
@inlineCallbacks
def test_synctoken(self):
"""
Test that action=synctoken works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_1 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_1 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertEqual(token1_1, token2_1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_2 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_2 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertNotEqual(token1_1, token1_2)
self.assertEqual(token1_2, token2_2)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_3 = yield calendar1.syncTokenRevision()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_3 = yield shared.syncTokenRevision()
yield self.commitTransaction(1)
self.assertNotEqual(token1_1, token1_3)
self.assertNotEqual(token1_2, token1_3)
self.assertEqual(token1_3, token2_3)
@inlineCallbacks
def test_resourcenamessincerevision(self):
"""
Test that action=synctoken works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_1 = yield calendar1.syncToken()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_1 = yield shared.syncToken()
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_2 = yield calendar1.syncToken()
names1 = yield calendar1.resourceNamesSinceToken(token1_1)
self.assertEqual(names1, ([u"1.ics"], [], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_2 = yield shared.syncToken()
names2 = yield shared.resourceNamesSinceToken(token2_1)
self.assertEqual(names2, ([u"1.ics"], [], [],))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
count = yield calendar1.countObjectResources()
self.assertEqual(count, 0)
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
token1_3 = yield calendar1.syncToken()
names1 = yield calendar1.resourceNamesSinceToken(token1_2)
self.assertEqual(names1, ([], [u"1.ics"], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
token2_3 = yield shared.syncToken()
names2 = yield shared.resourceNamesSinceToken(token2_2)
self.assertEqual(names2, ([], [u"1.ics"], [],))
yield self.commitTransaction(1)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
names1 = yield calendar1.resourceNamesSinceToken(token1_3)
self.assertEqual(names1, ([], [], [],))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
names2 = yield shared.resourceNamesSinceToken(token2_3)
self.assertEqual(names2, ([], [], [],))
yield self.commitTransaction(1)
@inlineCallbacks
def test_resourceuidforname(self):
"""
Test that action=resourceuidforname works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
uid = yield calendar1.resourceUIDForName("1.ics")
self.assertEqual(uid, "uid1")
uid = yield calendar1.resourceUIDForName("2.ics")
self.assertTrue(uid is None)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
uid = yield shared.resourceUIDForName("1.ics")
self.assertEqual(uid, "uid1")
uid = yield shared.resourceUIDForName("2.ics")
self.assertTrue(uid is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_resourcenameforuid(self):
"""
Test that action=resourcenameforuid works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
name = yield calendar1.resourceNameForUID("uid1")
self.assertEqual(name, "1.ics")
name = yield calendar1.resourceNameForUID("uid2")
self.assertTrue(name is None)
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
name = yield shared.resourceNameForUID("uid1")
self.assertEqual(name, "1.ics")
name = yield shared.resourceNameForUID("uid2")
self.assertTrue(name is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_search(self):
"""
Test that action=resourcenameforuid works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
filter = caldavxml.Filter(
caldavxml.ComponentFilter(
*[caldavxml.ComponentFilter(
**{"name": ("VEVENT", "VFREEBUSY", "VAVAILABILITY")}
)],
**{"name": "VCALENDAR"}
)
)
filter = Filter(filter)
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
names = [item[0] for item in (yield calendar1.search(filter))]
self.assertEqual(names, ["1.ics", ])
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
names = [item[0] for item in (yield shared.search(filter))]
self.assertEqual(names, ["1.ics", ])
yield self.commitTransaction(1)
@inlineCallbacks
def test_loadallobjects(self):
"""
Test that action=loadallobjects works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id1 = resource1.id()
resource2 = yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
resource_id2 = resource2.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 2)
self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "2.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid2",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id2,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is byuid["uid1"])
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is byuid["uid2"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithID(resource_id2)
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 1)
self.assertEqual(set([obj.name() for obj in resources]), set(("2.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid2",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id2,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is byuid["uid2"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id2)
self.assertTrue(resource is byname["2.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_loadallobjectswithnames(self):
"""
Test that action=loadallobjectswithnames works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id1 = resource1.id()
yield calendar1.createCalendarObjectWithName("2.ics", Component.fromString(self.caldata2))
resource3 = yield calendar1.createCalendarObjectWithName("3.ics", Component.fromString(self.caldata3))
resource_id3 = resource3.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResources()
self.assertEqual(len(resources), 3)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",))
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 2)
self.assertEqual(set([obj.name() for obj in resources]), set(("1.ics", "3.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid1", "uid3",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id1, resource_id3,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithName("3.ics")
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is byuid["uid1"])
resource = yield shared.objectResourceWithUID("uid3")
self.assertTrue(resource is byuid["uid3"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is byname["1.ics"])
resource = yield shared.objectResourceWithID(resource_id3)
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resources = yield shared.objectResourcesWithNames(("1.ics", "3.ics",))
byname = dict([(obj.name(), obj) for obj in resources])
byuid = dict([(obj.uid(), obj) for obj in resources])
self.assertEqual(len(resources), 1)
self.assertEqual(set([obj.name() for obj in resources]), set(("3.ics",)))
self.assertEqual(set([obj.uid() for obj in resources]), set(("uid3",)))
self.assertEqual(set([obj.id() for obj in resources]), set((resource_id3,)))
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithName("3.ics")
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithName("Missing.ics")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithUID("uid3")
self.assertTrue(resource is byuid["uid3"])
resource = yield shared.objectResourceWithUID("uid-missing")
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id1)
self.assertTrue(resource is None)
resource = yield shared.objectResourceWithID(resource_id3)
self.assertTrue(resource is byname["3.ics"])
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_objectwith(self):
"""
Test that action=objectwith works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
resource = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id = resource.id()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithName("2.ics")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithUID("uid2")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithID(resource_id)
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
resource = yield shared.objectResourceWithID(0)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.remove()
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithName("1.ics")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is None)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithID(resource_id)
self.assertTrue(resource is None)
yield self.commitTransaction(1)
@inlineCallbacks
def test_create(self):
"""
Test that action=create works.
"""
yield self.createShare("user01", "puser01")
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resource_id = resource.id()
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
self.assertFalse(resource._componentChanged)
yield self.commitTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
resource = yield shared.objectResourceWithUID("uid1")
self.assertTrue(resource is not None)
self.assertEqual(resource.name(), "1.ics")
self.assertEqual(resource.uid(), "uid1")
self.assertEqual(resource.id(), resource_id)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
self.assertTrue(object1 is not None)
self.assertEqual(object1.name(), "1.ics")
self.assertEqual(object1.uid(), "uid1")
self.assertEqual(object1.id(), resource_id)
yield self.commitTransaction(0)
@inlineCallbacks
def test_create_exception(self):
"""
Test that action=create fails when a duplicate name is used.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
yield self.failUnlessFailure(shared.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1)), ObjectResourceNameAlreadyExistsError)
yield self.abortTransaction(1)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
yield self.failUnlessFailure(shared.createCalendarObjectWithName(".2.ics", Component.fromString(self.caldata2)), ObjectResourceNameNotAllowedError)
yield self.abortTransaction(1)
@inlineCallbacks
def test_setcomponent(self):
"""
Test that action=setcomponent works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1))
yield self.commitTransaction(1)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
changed = yield shared_object.setComponent(Component.fromString(self.caldata1_changed))
self.assertFalse(changed)
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed))
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
ical = yield object1.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1_changed))
yield self.commitTransaction(0)
@inlineCallbacks
def test_component(self):
"""
Test that action=component works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
ical = yield shared_object.component()
self.assertTrue(isinstance(ical, Component))
self.assertEqual(normalize_iCalStr(str(ical)), normalize_iCalStr(self.caldata1))
yield self.commitTransaction(1)
@inlineCallbacks
def test_remove(self):
"""
Test that action=remove works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
yield shared_object.remove()
yield self.commitTransaction(1)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
self.assertTrue(shared_object is None)
yield self.commitTransaction(1)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
self.assertTrue(object1 is None)
yield self.commitTransaction(0)
@inlineCallbacks
def test_freebusy(self):
"""
Test that action=component works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
fbstart = "{now:04d}0102T000000Z".format(**self.nowYear)
fbend = "{now:04d}0103T000000Z".format(**self.nowYear)
shared = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", name="shared-calendar")
fbinfo = FreebusyQuery.FBInfo([], [], [])
timerange = Period(DateTime.parseText(fbstart), DateTime.parseText(fbend))
organizer = recipient = (yield calendarUserFromCalendarUserAddress("mailto:puser01@example.com", self.theTransactionUnderTest(1)))
freebusy = FreebusyQuery(organizer=organizer, recipient=recipient, timerange=timerange)
matchtotal = (yield freebusy.generateFreeBusyInfo([shared, ], fbinfo))
self.assertEqual(matchtotal, 1)
self.assertEqual(fbinfo[0], [Period.parseText("{now:04d}0102T140000Z/PT1H".format(**self.nowYear)), ])
self.assertEqual(len(fbinfo[1]), 0)
self.assertEqual(len(fbinfo[2]), 0)
yield self.commitTransaction(1)
def attachmentToString(self, attachment):
"""
Convenience to convert an L{IAttachment} to a string.
@param attachment: an L{IAttachment} provider to convert into a string.
@return: a L{Deferred} that fires with the contents of the attachment.
@rtype: L{Deferred} firing C{bytes}
"""
capture = CaptureProtocol()
attachment.retrieve(capture)
return capture.deferred
@inlineCallbacks
def test_add_attachment(self):
"""
Test that action=add-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
object1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
resourceID = object1.id()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
data = "Here is some text."
attachment, location = yield shared_object.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream(data))
managedID = attachment.managedID()
from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal
self.assertTrue(isinstance(attachment, ManagedAttachmentExternal))
self.assertEqual(attachment.size(), len(data))
self.assertTrue("user01/dropbox/" in location)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set((resourceID,)))
attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
self.assertEqual(attachment.name(), "test.txt")
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some text.")
yield self.commitTransaction(0)
@inlineCallbacks
def test_update_attachment(self):
"""
Test that action=update-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
resourceID = object1.id()
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
data = "Here is some more text."
attachment, location = yield shared_object.updateAttachment(managedID, MimeType.fromString("text/plain"), "test.txt", MemoryStream(data))
managedID = attachment.managedID()
from txdav.caldav.datastore.sql_external import ManagedAttachmentExternal
self.assertTrue(isinstance(attachment, ManagedAttachmentExternal))
self.assertEqual(attachment.size(), len(data))
self.assertTrue("user01/dropbox/" in location)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set((resourceID,)))
attachment = yield ManagedAttachment.load(self.transactionUnderTest(), resourceID, managedID)
self.assertEqual(attachment.name(), "test.txt")
data = yield self.attachmentToString(attachment)
self.assertEqual(data, "Here is some more text.")
yield self.commitTransaction(0)
@inlineCallbacks
def test_remove_attachment(self):
"""
Test that action=remove-attachment works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
resourceID = object1.id()
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
yield shared_object.removeAttachment(None, managedID)
yield self.commitTransaction(1)
cobjs = yield ManagedAttachment.referencesTo(self.theTransactionUnderTest(0), managedID)
self.assertEqual(cobjs, set())
attachment = yield ManagedAttachment.load(self.theTransactionUnderTest(0), resourceID, managedID)
self.assertTrue(attachment is None)
yield self.commitTransaction(0)
@inlineCallbacks
def test_get_all_attachments(self):
"""
Test that action=get-all-attachments works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
attachments = yield shared_object.ownerHome().getAllAttachments()
self.assertEqual(len(attachments), 1)
self.assertTrue(isinstance(attachments[0], ManagedAttachment))
self.assertEqual(attachments[0].contentType(), MimeType.fromString("text/plain"))
self.assertEqual(attachments[0].name(), "test.txt")
yield self.commitTransaction(1)
@inlineCallbacks
def test_get_attachment_data(self):
"""
Test that action=get-all-attachments works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
remote_id = attachment.id()
yield self.commitTransaction(0)
home1 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(1), name="puser01")
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
attachment = yield ManagedAttachment._create(self.theTransactionUnderTest(1), None, home1.id())
attachment._contentType = MimeType.fromString("text/plain")
attachment._name = "test.txt"
yield shared_object.ownerHome().readAttachmentData(remote_id, attachment)
yield self.commitTransaction(1)
@inlineCallbacks
def test_get_attachment_links(self):
"""
Test that action=get-attachment-links works.
"""
yield self.createShare("user01", "puser01")
calendar1 = yield self.calendarUnderTest(txn=self.theTransactionUnderTest(0), home="user01", name="calendar")
cobj1 = yield calendar1.createCalendarObjectWithName("1.ics", Component.fromString(self.caldata1))
calobjID = cobj1.id()
yield self.commitTransaction(0)
object1 = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(0), home="user01", calendar_name="calendar", name="1.ics")
attachment, _ignore_location = yield object1.addAttachment(None, MimeType.fromString("text/plain"), "test.txt", MemoryStream("Here is some text."))
attID = attachment.id()
managedID = attachment.managedID()
yield self.commitTransaction(0)
shared_object = yield self.calendarObjectUnderTest(txn=self.theTransactionUnderTest(1), home="puser01", calendar_name="shared-calendar", name="1.ics")
links = yield shared_object.ownerHome().getAttachmentLinks()
self.assertEqual(len(links), 1)
self.assertTrue(isinstance(links[0], AttachmentLink))
self.assertEqual(links[0]._attachmentID, attID)
self.assertEqual(links[0]._managedID, managedID)
self.assertEqual(links[0]._calendarObjectID, calobjID)
yield self.commitTransaction(1)
| 45.551224
| 158
| 0.690484
| 5,138
| 50,243
| 6.715064
| 0.080576
| 0.059997
| 0.085212
| 0.059678
| 0.810272
| 0.787809
| 0.784592
| 0.77297
| 0.751609
| 0.723291
| 0
| 0.030313
| 0.191071
| 50,243
| 1,102
| 159
| 45.592559
| 0.818591
| 0.038214
| 0
| 0.681648
| 0
| 0
| 0.099822
| 0.011544
| 0
| 0
| 0
| 0
| 0.203496
| 1
| 0.041199
| false
| 0
| 0.029963
| 0.003745
| 0.089888
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
041a41c21a479aa773799cd09b5a93ee5a8a115d
| 208
|
py
|
Python
|
Lib/ufo2ft/kernFeatureWriter.py
|
belluzj/ufo2ft
|
72cab16137c3da797b8384dd66f9fbcf65ad9978
|
[
"MIT"
] | null | null | null |
Lib/ufo2ft/kernFeatureWriter.py
|
belluzj/ufo2ft
|
72cab16137c3da797b8384dd66f9fbcf65ad9978
|
[
"MIT"
] | null | null | null |
Lib/ufo2ft/kernFeatureWriter.py
|
belluzj/ufo2ft
|
72cab16137c3da797b8384dd66f9fbcf65ad9978
|
[
"MIT"
] | null | null | null |
"""This module is deprecated! It's kept here only for backward compatibility.
Please import the new ufo2ft.featureWriters module.
"""
from ufo2ft.featureWriters.kernFeatureWriter import * # pragma: no cover
| 41.6
| 77
| 0.793269
| 27
| 208
| 6.111111
| 0.851852
| 0.242424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01105
| 0.129808
| 208
| 4
| 78
| 52
| 0.900552
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9bdd9739f584772f354f12dda8704ca96ca7e7ac
| 7,380
|
py
|
Python
|
im2mesh/onet_multi_layers_predict/models/feature_extractor.py
|
bezirganyan/Occup-R2N2
|
9adf6d0a9cc6f884fc17c80b24e72060dbacf3c1
|
[
"MIT"
] | 2
|
2020-06-03T20:54:26.000Z
|
2021-09-15T06:57:57.000Z
|
im2mesh/onet_multi_layers_predict/models/feature_extractor.py
|
bezirganyan/Occup-R2N2
|
9adf6d0a9cc6f884fc17c80b24e72060dbacf3c1
|
[
"MIT"
] | null | null | null |
im2mesh/onet_multi_layers_predict/models/feature_extractor.py
|
bezirganyan/Occup-R2N2
|
9adf6d0a9cc6f884fc17c80b24e72060dbacf3c1
|
[
"MIT"
] | 2
|
2020-06-14T12:27:22.000Z
|
2020-10-27T16:55:04.000Z
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from im2mesh.onet_multi_layers_predict.models import resnet
from im2mesh.common import normalize_imagenet
import im2mesh.common as common
def kmax_pooling(x, dim, k):
index = x.topk(k, dim=dim)[1].sort(dim=dim)[0]
return x.gather(dim, index)
class Resnet18_Full(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, normalize=True, batch_norm=False, pretrained=True, pretrained_path=None):
super().__init__()
self.normalize = normalize
self.features = resnet.resnet18(pretrained=pretrained, pretrained_path=pretrained_path)
if c_dim != 512:
self.fc3 = nn.Linear(512, c_dim)
self.fc2 = nn.Linear(512, c_dim)
self.fc1 = nn.Linear(512, c_dim)
else:
self.fc3 = nn.Sequential()
self.fc2 = nn.Sequential()
self.fc1 = nn.Sequential()
self.batch_norm = batch_norm
if self.batch_norm:
print('Using layer norm in encdoer')
self.f1_bn = nn.BatchNorm1d(c_dim)
self.f2_bn = nn.BatchNorm1d(c_dim)
self.f3_bn = nn.BatchNorm1d(c_dim)
self.f1_fc = nn.Linear(1568,512)
self.f2_fc = nn.Linear(1568,512)
def forward(self, x):
if self.normalize:
x = normalize_imagenet(x)
f3,f2,f1 = self.features(x)
# f3: 512 f2: 256 * 14 * 14 f1: 128 * 28 * 28
# full kmax pooling
f1 = f1.detach()
f1 = kmax_pooling(f1,1,2)
f1 = f1.view(f1.size(0), -1)
f1 = self.f1_fc(f1)
f2 = f2.detach()
f2 = kmax_pooling(f2,1,8)
f2 = f2.view(f2.size(0), -1)
f2 = self.f2_fc(f2)
f3 = self.fc3(f3)
f2 = self.fc2(f2)
f1 = self.fc1(f1)
if self.batch_norm:
f3 = self.f3_bn(f3)
f2 = self.f2_bn(f2)
f1 = self.f1_bn(f1)
return f3, f2, f1
class Resnet18_Local(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, feature_map_dim=64 ,normalize=True, batch_norm=False, pretrained=True, pretrained_path=None):
super().__init__()
self.normalize = normalize
self.features = resnet.resnet18(pretrained=pretrained, pretrained_path=pretrained_path)
if c_dim != 512:
self.fc3 = nn.Linear(512, c_dim)
else:
self.fc3 = nn.Sequential()
self.feature_map_dim = feature_map_dim
self.batch_norm = batch_norm
if self.batch_norm:
print('Using layer norm in encdoer')
self.f1_bn = nn.BatchNorm1d(feature_map_dim)
self.f2_bn = nn.BatchNorm1d(feature_map_dim)
self.f3_bn = nn.BatchNorm1d(c_dim)
self.f2_conv = nn.Conv1d(256, self.feature_map_dim ,1)
self.f1_conv = nn.Conv1d(128, self.feature_map_dim ,1)
def forward(self, x, pts, world_mat, camera_mat):
f3, f2, f1 = self.encode_first_step(x)
f3, f2, f1 = self.encode_second_step(f3, f2, f1, pts, world_mat, camera_mat)
return f3, f2, f1
def encode_first_step(self, x):
if self.normalize:
x = normalize_imagenet(x)
f3,f2,f1 = self.features(x)
return f3, f2, f1
def encode_second_step(self, f3, f2, f1, pts, world_mat, camera_mat):
pts = common.transform_points(pts, world_mat)
points_img = common.project_to_camera(pts, camera_mat)
points_img = points_img.unsqueeze(1)
f2 = f2.detach()
f2 = F.relu(f2)
f2 = F.grid_sample(f2, points_img)
f2 = f2.squeeze(2)
f2 = self.f2_conv(f2)
f1 = f1.detach()
f1 = F.relu(f1)
f1 = F.grid_sample(f1, points_img)
f1 = f1.squeeze(2)
f1 = self.f1_conv(f1)
f3 = self.fc3(f3)
if self.batch_norm:
f3 = self.f3_bn(f3)
f2 = self.f2_bn(f2)
f1 = self.f1_bn(f1)
f2 = f2.transpose(1, 2)
f1 = f1.transpose(1, 2)
# f2 : batch * n_pts * fmap_dim
# f1 : batch * n_pts * fmap_dim
return f3, f2, f1
class Resnet18_Local_1(nn.Module):
r''' ResNet-18 encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
use_linear (bool): whether a final linear layer should be used
'''
def __init__(self, c_dim, feature_map_dim=64 ,normalize=True, batch_norm=False, pretrained=True, pretrained_path=None):
super().__init__()
self.normalize = normalize
self.features = resnet.resnet18(pretrained=pretrained, pretrained_path=pretrained_path)
if c_dim != 512:
self.fc3 = nn.Linear(512, c_dim)
else:
self.fc3 = nn.Sequential()
self.feature_map_dim = feature_map_dim
self.batch_norm = batch_norm
if self.batch_norm:
print('Using layer norm in encdoer')
self.f1_bn = nn.BatchNorm1d(feature_map_dim)
self.f2_bn = nn.BatchNorm1d(feature_map_dim)
self.f3_bn = nn.BatchNorm1d(c_dim)
self.f2_conv = nn.Conv1d(256+128, self.feature_map_dim ,1)
self.f1_conv = nn.Conv1d(64+64+3, self.feature_map_dim ,1)
def forward(self, x, pts, world_mat, camera_mat):
f3, fs2, fs1 = self.encode_first_step(x)
f3, f2, f1 = self.encode_second_step(f3, fs2, fs1, pts, world_mat, camera_mat)
return f3, f2, f1
def encode_first_step(self, x):
if self.normalize:
x = normalize_imagenet(x)
f3,fs2,fs1 = self.features.calc_feature_maps(x)
return f3, fs2, fs1
def encode_second_step(self, f3, fs2, fs1, pts, world_mat, camera_mat):
pts = common.transform_points(pts, world_mat)
points_img = common.project_to_camera(pts, camera_mat)
points_img = points_img.unsqueeze(1)
fs2_sampled = []
for f2 in fs2:
f2 = f2.detach()
f2 = F.relu(f2)
f2 = F.grid_sample(f2, points_img)
f2 = f2.squeeze(2)
fs2_sampled.append(f2)
fs2 = torch.cat(fs2_sampled, dim=1)
fs1_sampled = []
for f1 in fs1:
f1 = f1.detach()
f1 = F.relu(f1)
f1 = F.grid_sample(f1, points_img)
f1 = f1.squeeze(2)
fs1_sampled.append(f1)
fs1 = torch.cat(fs1_sampled, dim=1)
fs2 = self.f2_conv(fs2)
fs1 = self.f1_conv(fs1)
f3 = self.fc3(f3)
if self.batch_norm:
f3 = self.f3_bn(f3)
fs2 = self.f2_bn(fs2)
fs1 = self.f1_bn(fs1)
fs2 = fs2.transpose(1, 2)
fs1 = fs1.transpose(1, 2)
# f2 : batch * n_pts * fmap_dim
# f1 : batch * n_pts * fmap_dim
return f3, fs2, fs1
| 32.654867
| 123
| 0.59336
| 1,062
| 7,380
| 3.925612
| 0.12806
| 0.01823
| 0.043656
| 0.021588
| 0.80355
| 0.794435
| 0.769729
| 0.754857
| 0.749101
| 0.741425
| 0
| 0.068772
| 0.300542
| 7,380
| 225
| 124
| 32.8
| 0.738861
| 0.123035
| 0
| 0.610063
| 0
| 0
| 0.012686
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069182
| false
| 0
| 0.037736
| 0
| 0.176101
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50062d3eb0b7a6ce853267189edef5492f1a8e9e
| 17,597
|
py
|
Python
|
misago/misago/threads/tests/test_thread_editreply_api.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/misago/threads/tests/test_thread_editreply_api.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
misago/misago/threads/tests/test_thread_editreply_api.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from django.test.client import BOUNDARY, MULTIPART_CONTENT, encode_multipart
from django.urls import reverse
from django.utils import timezone
from .. import test
from ...acl.test import patch_user_acl
from ...categories.models import Category
from ...users.test import AuthenticatedUserTestCase
from ..models import Post, Thread
from ..test import patch_category_acl
class EditReplyTests(AuthenticatedUserTestCase):
def setUp(self):
super().setUp()
self.category = Category.objects.get(slug="first-category")
self.thread = test.post_thread(category=self.category)
self.post = test.reply_thread(self.thread, poster=self.user)
self.api_link = reverse(
"misago:api:thread-post-detail",
kwargs={"thread_pk": self.thread.pk, "pk": self.post.pk},
)
def put(self, url, data=None):
content = encode_multipart(BOUNDARY, data or {})
return self.client.put(url, content, content_type=MULTIPART_CONTENT)
def test_cant_edit_reply_as_guest(self):
"""user has to be authenticated to be able to edit reply"""
self.logout_user()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
def test_thread_visibility(self):
"""thread's visibility is validated"""
with patch_category_acl({"can_see": False}):
response = self.put(self.api_link)
self.assertEqual(response.status_code, 404)
with patch_category_acl({"can_browse": False}):
response = self.put(self.api_link)
self.assertEqual(response.status_code, 404)
with patch_category_acl({"can_see_all_threads": False}):
response = self.put(self.api_link)
self.assertEqual(response.status_code, 404)
@patch_category_acl({"can_edit_posts": 0})
def test_cant_edit_reply(self):
"""permission to edit reply is validated"""
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(), {"detail": "You can't edit posts in this category."}
)
@patch_category_acl({"can_edit_posts": 1})
def test_cant_edit_other_user_reply(self):
"""permission to edit reply by other users is validated"""
self.post.poster = None
self.post.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You can't edit other users posts in this category."},
)
@patch_category_acl({"can_edit_posts": 1, "post_edit_time": 1})
def test_edit_too_old(self):
"""permission to edit reply within timelimit is validated"""
self.post.posted_on = timezone.now() - timedelta(minutes=5)
self.post.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You can't edit posts that are older than 1 minute."},
)
@patch_category_acl({"can_edit_posts": 1, "can_close_threads": False})
def test_closed_category_no_permission(self):
"""permssion to edit reply in closed category is validated"""
self.category.is_closed = True
self.category.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "This category is closed. You can't edit posts in it."},
)
@patch_category_acl({"can_edit_posts": 1, "can_close_threads": True})
def test_closed_category(self):
"""permssion to edit reply in closed category is validated"""
self.category.is_closed = True
self.category.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 400)
@patch_category_acl({"can_edit_posts": 1, "can_close_threads": False})
def test_closed_thread_no_permission(self):
"""permssion to edit reply in closed thread is validated"""
self.thread.is_closed = True
self.thread.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "This thread is closed. You can't edit posts in it."},
)
@patch_category_acl({"can_edit_posts": 1, "can_close_threads": True})
def test_closed_thread(self):
"""permssion to edit reply in closed thread is validated"""
self.thread.is_closed = True
self.thread.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 400)
@patch_category_acl({"can_edit_posts": 1, "can_protect_posts": False})
def test_protected_post_no_permission(self):
"""permssion to edit protected post is validated"""
self.post.is_protected = True
self.post.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(), {"detail": "This post is protected. You can't edit it."}
)
@patch_category_acl({"can_edit_posts": 1, "can_protect_posts": True})
def test_protected_post_no(self):
"""permssion to edit protected post is validated"""
self.post.is_protected = True
self.post.save()
response = self.put(self.api_link)
self.assertEqual(response.status_code, 400)
@patch_category_acl({"can_edit_posts": 1})
def test_empty_data(self):
"""no data sent handling has no showstoppers"""
response = self.put(self.api_link, data={})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"post": ["You have to enter a message."]})
@patch_category_acl({"can_edit_posts": 1})
def test_invalid_data(self):
"""api errors for invalid request data"""
response = self.client.put(
self.api_link, "false", content_type="application/json"
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"non_field_errors": [
"Invalid data. Expected a dictionary, but got bool."
]
},
)
@patch_category_acl({"can_edit_posts": 1})
def test_edit_event(self):
"""events can't be edited"""
self.post.is_event = True
self.post.save()
response = self.put(self.api_link, data={})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.json(), {"detail": "Events can't be edited."})
@patch_category_acl({"can_edit_posts": 1})
def test_post_is_validated(self):
"""post is validated"""
response = self.put(self.api_link, data={"post": "a"})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"post": [
"Posted message should be at least 5 characters long (it has 1)."
]
},
)
@patch_category_acl({"can_edit_posts": 1})
def test_edit_reply_no_change(self):
"""endpoint isn't bumping edits count if no change was made to post's body"""
self.assertEqual(self.post.edits_record.count(), 0)
response = self.put(self.api_link, data={"post": self.post.original})
self.assertEqual(response.status_code, 200)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, self.post.parsed)
post = self.thread.post_set.order_by("id").last()
self.assertEqual(post.edits, 0)
self.assertEqual(post.original, self.post.original)
self.assertIsNone(post.last_editor_id, self.user.id)
self.assertIsNone(post.last_editor_name, self.user.username)
self.assertIsNone(post.last_editor_slug, self.user.slug)
self.assertEqual(self.post.edits_record.count(), 0)
@patch_category_acl({"can_edit_posts": 1})
def test_edit_reply(self):
"""endpoint updates reply"""
self.assertEqual(self.post.edits_record.count(), 0)
response = self.put(self.api_link, data={"post": "This is test edit!"})
self.assertEqual(response.status_code, 200)
response = self.client.get(self.thread.get_absolute_url())
self.assertContains(response, "<p>This is test edit!</p>")
self.assertEqual(self.user.audittrail_set.count(), 1)
post = self.thread.post_set.order_by("id").last()
self.assertEqual(post.edits, 1)
self.assertEqual(post.original, "This is test edit!")
self.assertEqual(post.last_editor_id, self.user.id)
self.assertEqual(post.last_editor_name, self.user.username)
self.assertEqual(post.last_editor_slug, self.user.slug)
self.assertEqual(self.post.edits_record.count(), 1)
post_edit = post.edits_record.last()
self.assertEqual(post_edit.edited_from, self.post.original)
self.assertEqual(post_edit.edited_to, post.original)
self.assertEqual(post_edit.editor_id, self.user.id)
self.assertEqual(post_edit.editor_name, self.user.username)
self.assertEqual(post_edit.editor_slug, self.user.slug)
@patch_category_acl({"can_edit_posts": 2, "can_hide_threads": 1})
def test_edit_first_post_hidden(self):
"""endpoint updates hidden thread's first post"""
self.thread.is_hidden = True
self.thread.save()
self.thread.first_post.is_hidden = True
self.thread.first_post.save()
api_link = reverse(
"misago:api:thread-post-detail",
kwargs={"thread_pk": self.thread.pk, "pk": self.thread.first_post.pk},
)
response = self.put(api_link, data={"post": "This is test edit!"})
self.assertEqual(response.status_code, 200)
@patch_category_acl({"can_edit_posts": 1, "can_protect_posts": True})
def test_protect_post(self):
"""can protect post"""
response = self.put(
self.api_link, data={"post": "Lorem ipsum dolor met!", "protect": 1}
)
self.assertEqual(response.status_code, 200)
post = self.user.post_set.order_by("id").last()
self.assertTrue(post.is_protected)
@patch_category_acl({"can_edit_posts": 1, "can_protect_posts": False})
def test_protect_post_no_permission(self):
"""cant protect post without permission"""
response = self.put(
self.api_link, data={"post": "Lorem ipsum dolor met!", "protect": 1}
)
self.assertEqual(response.status_code, 200)
post = self.user.post_set.order_by("id").last()
self.assertFalse(post.is_protected)
@patch_category_acl({"can_edit_posts": 1})
def test_post_unicode(self):
"""unicode characters can be posted"""
response = self.put(
self.api_link, data={"post": "Chrzążczyżewoszyce, powiat Łękółody."}
)
self.assertEqual(response.status_code, 200)
@patch_category_acl({"can_edit_posts": 1})
def test_reply_category_moderation_queue(self):
"""edit sends reply to queue due to category setup"""
self.category.require_edits_approval = True
self.category.save()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
post = self.user.post_set.all()[:1][0]
self.assertTrue(post.is_unapproved)
@patch_category_acl({"can_edit_posts": 1})
@patch_user_acl({"can_approve_content": True})
def test_reply_category_moderation_queue_bypass(self):
"""bypass moderation queue due to user's acl"""
self.category.require_edits_approval = True
self.category.save()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
post = self.user.post_set.all()[:1][0]
self.assertFalse(post.is_unapproved)
@patch_category_acl({"can_edit_posts": 1, "require_edits_approval": True})
def test_reply_user_moderation_queue(self):
"""edit sends reply to queue due to user acl"""
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
post = self.user.post_set.all()[:1][0]
self.assertTrue(post.is_unapproved)
@patch_category_acl({"can_edit_posts": 1, "require_edits_approval": True})
@patch_user_acl({"can_approve_content": True})
def test_reply_user_moderation_queue_bypass(self):
"""bypass moderation queue due to user's acl"""
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
post = self.user.post_set.all()[:1][0]
self.assertFalse(post.is_unapproved)
@patch_category_acl(
{
"can_edit_posts": 1,
"require_threads_approval": True,
"require_replies_approval": True,
}
)
def test_reply_omit_other_moderation_queues(self):
"""other queues are omitted"""
self.category.require_threads_approval = True
self.category.require_replies_approval = True
self.category.save()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
post = self.user.post_set.all()[:1][0]
self.assertFalse(post.is_unapproved)
def setUpFirstReplyTest(self):
self.post = self.thread.first_post
self.post.poster = self.user
self.post.save()
self.api_link = reverse(
"misago:api:thread-post-detail",
kwargs={"thread_pk": self.thread.pk, "pk": self.post.pk},
)
@patch_category_acl({"can_edit_posts": 1})
def test_first_reply_category_moderation_queue(self):
"""edit sends thread to queue due to category setup"""
self.setUpFirstReplyTest()
self.category.require_edits_approval = True
self.category.save()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
thread = Thread.objects.get(pk=self.thread.pk)
self.assertTrue(thread.is_unapproved)
self.assertTrue(thread.has_unapproved_posts)
post = Post.objects.get(pk=self.post.pk)
self.assertTrue(post.is_unapproved)
@patch_category_acl({"can_edit_posts": 1})
@patch_user_acl({"can_approve_content": True})
def test_first_reply_category_moderation_queue_bypass(self):
"""bypass moderation queue due to user's acl"""
self.setUpFirstReplyTest()
self.category.require_edits_approval = True
self.category.save()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
thread = Thread.objects.get(pk=self.thread.pk)
self.assertFalse(thread.is_unapproved)
self.assertFalse(thread.has_unapproved_posts)
post = Post.objects.get(pk=self.post.pk)
self.assertFalse(post.is_unapproved)
@patch_category_acl({"can_edit_posts": 1, "require_edits_approval": True})
def test_first_reply_user_moderation_queue(self):
"""edit sends thread to queue due to user acl"""
self.setUpFirstReplyTest()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
thread = Thread.objects.get(pk=self.thread.pk)
self.assertTrue(thread.is_unapproved)
self.assertTrue(thread.has_unapproved_posts)
post = Post.objects.get(pk=self.post.pk)
self.assertTrue(post.is_unapproved)
@patch_category_acl({"can_edit_posts": 1, "require_edits_approval": True})
@patch_user_acl({"can_approve_content": True})
def test_first_reply_user_moderation_queue_bypass(self):
"""bypass moderation queue due to user's acl"""
self.setUpFirstReplyTest()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
thread = Thread.objects.get(pk=self.thread.pk)
self.assertFalse(thread.is_unapproved)
self.assertFalse(thread.has_unapproved_posts)
post = Post.objects.get(pk=self.post.pk)
self.assertFalse(post.is_unapproved)
@patch_category_acl(
{
"can_edit_posts": 1,
"require_threads_approval": True,
"require_replies_approval": True,
}
)
def test_first_reply_omit_other_moderation_queues(self):
"""other queues are omitted"""
self.setUpFirstReplyTest()
self.category.require_threads_approval = True
self.category.require_replies_approval = True
self.category.save()
response = self.put(self.api_link, data={"post": "Lorem ipsum dolor met!"})
self.assertEqual(response.status_code, 200)
thread = Thread.objects.get(pk=self.thread.pk)
self.assertFalse(thread.is_unapproved)
self.assertFalse(thread.has_unapproved_posts)
post = Post.objects.get(pk=self.post.pk)
self.assertFalse(post.is_unapproved)
| 38.088745
| 85
| 0.652554
| 2,238
| 17,597
| 4.914656
| 0.087578
| 0.081826
| 0.089917
| 0.087008
| 0.813983
| 0.785617
| 0.759433
| 0.753432
| 0.722066
| 0.711246
| 0
| 0.01124
| 0.226459
| 17,597
| 461
| 86
| 38.171367
| 0.796797
| 0.073251
| 0
| 0.595238
| 0
| 0
| 0.124133
| 0.016778
| 0
| 0
| 0
| 0
| 0.258929
| 1
| 0.10119
| false
| 0.011905
| 0.029762
| 0
| 0.136905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50521f699f2cd7deae7492a6a14bf8db3ecbdf8e
| 34,732
|
py
|
Python
|
deepsleep/trainer.py
|
HTJR/deepsleepnet
|
d4906b4875547a45175eaba8bdde280b7b1496f1
|
[
"Apache-2.0"
] | 266
|
2017-06-24T03:27:52.000Z
|
2022-03-28T14:21:03.000Z
|
deepsleep/trainer.py
|
HOCHAN-LEE/deepsleepnet
|
24dedefbff5f3ab9cd7e8d20808afb866261302d
|
[
"Apache-2.0"
] | 43
|
2017-07-13T13:03:02.000Z
|
2022-01-07T06:49:45.000Z
|
deepsleep/trainer.py
|
HOCHAN-LEE/deepsleepnet
|
24dedefbff5f3ab9cd7e8d20808afb866261302d
|
[
"Apache-2.0"
] | 136
|
2017-07-09T11:45:51.000Z
|
2022-03-24T19:45:30.000Z
|
import itertools
import os
import re
import time
from datetime import datetime
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix, f1_score
from deepsleep.data_loader import NonSeqDataLoader, SeqDataLoader
from deepsleep.model import DeepFeatureNet, DeepSleepNet
from deepsleep.optimize import adam, adam_clipping_list_lr
from deepsleep.utils import iterate_minibatches, iterate_batch_seq_minibatches
# from tensorlayer.db import TensorDB
# from tensorlayer.db import JobStatus
# db = TensorDB(ip='146.169.33.34', port=27020, db_name='DeepSleepNet', user_name='tensorlayer', password='Tensorlayer123', studyID='1')
class Trainer(object):
def __init__(
self,
interval_plot_filter=50,
interval_save_model=100,
interval_print_cm=10
):
self.interval_plot_filter = interval_plot_filter
self.interval_save_model = interval_save_model
self.interval_print_cm = interval_print_cm
def print_performance(self, sess, output_dir, network_name,
n_train_examples, n_valid_examples,
train_cm, valid_cm, epoch, n_epochs,
train_duration, train_loss, train_acc, train_f1,
valid_duration, valid_loss, valid_acc, valid_f1):
# Get regularization loss
train_reg_loss = tf.add_n(tf.compat.v1.get_collection("losses", scope=network_name + "\/"))
train_reg_loss_value = sess.run(train_reg_loss)
valid_reg_loss_value = train_reg_loss_value
# Print performance
if ((epoch + 1) % self.interval_print_cm == 0) or ((epoch + 1) == n_epochs):
print(" ")
print("[{}] epoch {}:".format(
datetime.now(), epoch+1
))
print((
"train ({:.3f} sec): n={}, loss={:.3f} ({:.3f}), acc={:.3f}, "
"f1={:.3f}".format(
train_duration, n_train_examples,
train_loss, train_reg_loss_value,
train_acc, train_f1
)
))
print(train_cm)
print((
"valid ({:.3f} sec): n={}, loss={:.3f} ({:.3f}), acc={:.3f}, "
"f1={:.3f}".format(
valid_duration, n_valid_examples,
valid_loss, valid_reg_loss_value,
valid_acc, valid_f1
)
))
print(valid_cm)
print(" ")
else:
print((
"epoch {}: "
"train ({:.2f} sec): n={}, loss={:.3f} ({:.3f}), "
"acc={:.3f}, f1={:.3f} | "
"valid ({:.2f} sec): n={}, loss={:.3f} ({:.3f}), "
"acc={:.3f}, f1={:.3f}".format(
epoch+1,
train_duration, n_train_examples,
train_loss, train_reg_loss_value,
train_acc, train_f1,
valid_duration, n_valid_examples,
valid_loss, valid_reg_loss_value,
valid_acc, valid_f1
)
))
def print_network(self, network):
print("inputs ({}): {}".format(
network.inputs.name, network.inputs.get_shape()
))
print("targets ({}): {}".format(
network.targets.name, network.targets.get_shape()
))
for name, act in network.activations:
print("{} ({}): {}".format(name, act.name, act.get_shape()))
print(" ")
def plot_filters(self, sess, epoch, reg_exp, output_dir, n_viz_filters):
conv_weight = re.compile(reg_exp)
for v in tf.compat.v1.trainable_variables():
value = sess.run(v)
if conv_weight.match(v.name):
weights = np.squeeze(value)
# Only plot conv that has one channel
if len(weights.shape) > 2:
continue
weights = weights.T
plt.figure(figsize=(18, 10))
plt.title(v.name)
for w_idx in range(n_viz_filters):
plt.subplot(4, 4, w_idx+1)
plt.plot(weights[w_idx])
plt.axis("tight")
plt.savefig(os.path.join(
output_dir, "{}_{}.png".format(
v.name.replace("/", "_").replace(":0", ""),
epoch+1
)
))
plt.close("all")
class DeepFeatureNetTrainer(Trainer):
def __init__(
self,
data_dir,
output_dir,
n_folds,
fold_idx,
batch_size,
input_dims,
n_classes,
interval_plot_filter=50,
interval_save_model=100,
interval_print_cm=10
):
super(self.__class__, self).__init__(
interval_plot_filter=interval_plot_filter,
interval_save_model=interval_save_model,
interval_print_cm=interval_print_cm
)
self.data_dir = data_dir
self.output_dir = output_dir
self.n_folds = n_folds
self.fold_idx = fold_idx
self.batch_size = batch_size
self.input_dims = input_dims
self.n_classes = n_classes
def _run_epoch(self, sess, network, inputs, targets, train_op, is_train):
start_time = time.time()
y = []
y_true = []
total_loss, n_batches = 0.0, 0
is_shuffle = True if is_train else False
for x_batch, y_batch in iterate_minibatches(inputs,
targets,
self.batch_size,
shuffle=is_shuffle):
feed_dict = {
network.input_var: x_batch,
network.target_var: y_batch
}
# # MONITORING
# if n_batches == 0:
# print "BEFORE UPDATE [is_train={}]".format(is_train)
# for n, v in network.monitor_vars[:2]:
# val = sess.run(v, feed_dict=feed_dict)
# val = np.transpose(val, axes=(3, 0, 1, 2)).reshape((64, -1))
# mean_val = np.mean(val, axis=1)
# var_val = np.var(val, axis=1)
# print "{}: {}\nmean_shape={}, mean_val={}\nvar_shape={}, var_val={}".format(
# n, val.shape, mean_val.shape, mean_val[:5], var_val.shape, var_val[:5]
# )
_, loss_value, y_pred = sess.run(
[train_op, network.loss_op, network.pred_op],
feed_dict=feed_dict
)
# # MONITORING
# if n_batches == 0:
# print "AFTER UPDATE [is_train={}]".format(is_train)
# for n, v in network.monitor_vars[:2]:
# val = sess.run(v, feed_dict=feed_dict)
# val = np.transpose(val, axes=(3, 0, 1, 2)).reshape((64, -1))
# mean_val = np.mean(val, axis=1)
# var_val = np.var(val, axis=1)
# print "{}: {}\nmean_shape={}, mean_val={}\nvar_shape={}, var_val={}".format(
# n, val.shape, mean_val.shape, mean_val[:5], var_val.shape, var_val[:5]
# )
total_loss += loss_value
n_batches += 1
y.append(y_pred)
y_true.append(y_batch)
# Check the loss value
assert not np.isnan(loss_value), \
"Model diverged with loss = NaN"
duration = time.time() - start_time
total_loss /= n_batches
total_y_pred = np.hstack(y)
total_y_true = np.hstack(y_true)
return total_y_true, total_y_pred, total_loss, duration
def train(self, n_epochs, resume):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
# Build training and validation networks
train_net = DeepFeatureNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
is_train=True,
reuse_params=False,
use_dropout=True
)
valid_net = DeepFeatureNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
is_train=False,
reuse_params=True,
use_dropout=True
)
# Initialize parameters
train_net.init_ops()
valid_net.init_ops()
print("Network (layers={})".format(len(train_net.activations)))
print("inputs ({}): {}".format(
train_net.input_var.name, train_net.input_var.get_shape()
))
print("targets ({}): {}".format(
train_net.target_var.name, train_net.target_var.get_shape()
))
for name, act in train_net.activations:
print("{} ({}): {}".format(name, act.name, act.get_shape()))
print(" ")
# Define optimization operations
train_op, grads_and_vars_op = adam(
loss=train_net.loss_op,
lr=1e-4,
train_vars=tf.compat.v1.trainable_variables()
)
# Make subdirectory for pretraining
output_dir = os.path.join(self.output_dir, "fold{}".format(self.fold_idx), train_net.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Global step for resume training
with tf.compat.v1.variable_scope(train_net.name) as scope:
global_step = tf.Variable(0, name="global_step", trainable=False)
# print "Trainable Variables:"
# for v in tf.compat.v1.trainable_variables():
# print v.name, v.get_shape()
# print " "
# print "All Variables:"
# for v in tf.compat.v1.global_variables():
# print v.name, v.get_shape()
# print " "
# Create a saver
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=0)
# Initialize variables in the graph
sess.run(tf.compat.v1.global_variables_initializer())
# Add the graph structure into the Tensorboard writer
train_summary_wrt = tf.compat.v1.summary.FileWriter(
os.path.join(output_dir, "train_summary"),
sess.graph
)
# Resume the training if applicable
if resume:
if os.path.exists(output_dir):
if os.path.isfile(os.path.join(output_dir, "checkpoint")):
# Restore the last checkpoint
saver.restore(sess, tf.train.latest_checkpoint(output_dir))
print("Model restored")
print("[{}] Resume pre-training ...\n".format(datetime.now()))
else:
print("[{}] Start pre-training ...\n".format(datetime.now()))
else:
print("[{}] Start pre-training ...\n".format(datetime.now()))
# Load data
if sess.run(global_step) < n_epochs:
data_loader = NonSeqDataLoader(
data_dir=self.data_dir,
n_folds=self.n_folds,
fold_idx=self.fold_idx
)
x_train, y_train, x_valid, y_valid = data_loader.load_train_data()
# Performance history
all_train_loss = np.zeros(n_epochs)
all_train_acc = np.zeros(n_epochs)
all_train_f1 = np.zeros(n_epochs)
all_valid_loss = np.zeros(n_epochs)
all_valid_acc = np.zeros(n_epochs)
all_valid_f1 = np.zeros(n_epochs)
# Loop each epoch
for epoch in range(sess.run(global_step), n_epochs):
# # MONITORING
# print "BEFORE TRAINING"
# monitor_vars = [
# "deepfeaturenet/l1_conv/bn/moving_mean:0",
# "deepfeaturenet/l1_conv/bn/moving_variance:0"
# ]
# for n in monitor_vars:
# v = tf.compat.v1.get_default_graph().get_tensor_by_name(n)
# val = sess.run(v)
# print "{}: {}, {}".format(n, val.shape, val[:5])
# Update parameters and compute loss of training set
y_true_train, y_pred_train, train_loss, train_duration = \
self._run_epoch(
sess=sess, network=train_net,
inputs=x_train, targets=y_train,
train_op=train_op,
is_train=True
)
n_train_examples = len(y_true_train)
train_cm = confusion_matrix(y_true_train, y_pred_train)
train_acc = np.mean(y_true_train == y_pred_train)
train_f1 = f1_score(y_true_train, y_pred_train, average="macro")
# # MONITORING
# print "AFTER TRAINING and BEFORE VALID"
# for n in monitor_vars:
# v = tf.compat.v1.get_default_graph().get_tensor_by_name(n)
# val = sess.run(v)
# print "{}: {}, {}".format(n, val.shape, val[:5])
# Evaluate the model on the validation set
y_true_val, y_pred_val, valid_loss, valid_duration = \
self._run_epoch(
sess=sess, network=valid_net,
inputs=x_valid, targets=y_valid,
train_op=tf.no_op(),
is_train=False
)
n_valid_examples = len(y_true_val)
valid_cm = confusion_matrix(y_true_val, y_pred_val)
valid_acc = np.mean(y_true_val == y_pred_val)
valid_f1 = f1_score(y_true_val, y_pred_val, average="macro")
# db.train_log(args={
# "n_folds": self.n_folds,
# "fold_idx": self.fold_idx,
# "epoch": epoch,
# "train_step": "pretraining",
# "datetime": datetime.utcnow(),
# "model": train_net.name,
# "n_train_examples": n_train_examples,
# "n_valid_examples": n_valid_examples,
# "train_loss": train_loss,
# "train_acc": train_acc,
# "train_f1": train_f1,
# "train_duration": train_duration,
# "valid_loss": valid_loss,
# "valid_acc": valid_acc,
# "valid_f1": valid_f1,
# "valid_duration": valid_duration,
# })
all_train_loss[epoch] = train_loss
all_train_acc[epoch] = train_acc
all_train_f1[epoch] = train_f1
all_valid_loss[epoch] = valid_loss
all_valid_acc[epoch] = valid_acc
all_valid_f1[epoch] = valid_f1
# Report performance
self.print_performance(
sess, output_dir, train_net.name,
n_train_examples, n_valid_examples,
train_cm, valid_cm, epoch, n_epochs,
train_duration, train_loss, train_acc, train_f1,
valid_duration, valid_loss, valid_acc, valid_f1
)
# Save performance history
np.savez(
os.path.join(output_dir, "perf_fold{}.npz".format(self.fold_idx)),
train_loss=all_train_loss, valid_loss=all_valid_loss,
train_acc=all_train_acc, valid_acc=all_valid_acc,
train_f1=all_train_f1, valid_f1=all_valid_f1,
y_true_val=np.asarray(y_true_val),
y_pred_val=np.asarray(y_pred_val)
)
# Visualize weights from convolutional layers
if ((epoch + 1) % self.interval_plot_filter == 0) or ((epoch + 1) == n_epochs):
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?\/l[0-9]+_conv\/(weights)", output_dir, 16)
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?/l[0-9]+_conv\/conv1d\/(weights)", output_dir, 16)
# Save checkpoint
sess.run(tf.compat.v1.assign(global_step, epoch+1))
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_path = os.path.join(
output_dir, "model_fold{}.ckpt".format(self.fold_idx)
)
saver.save(sess, save_path, global_step=global_step)
duration = time.time() - start_time
print("Saved model checkpoint ({:.3f} sec)".format(duration))
# Save paramaters
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_dict = {}
for v in tf.compat.v1.global_variables():
save_dict[v.name] = sess.run(v)
np.savez(
os.path.join(
output_dir,
"params_fold{}.npz".format(self.fold_idx)),
**save_dict
)
duration = time.time() - start_time
print("Saved trained parameters ({:.3f} sec)".format(duration))
print("Finish pre-training")
return os.path.join(output_dir, "params_fold{}.npz".format(self.fold_idx))
class DeepSleepNetTrainer(Trainer):
def __init__(
self,
data_dir,
output_dir,
n_folds,
fold_idx,
batch_size,
input_dims,
n_classes,
seq_length,
n_rnn_layers,
return_last,
interval_plot_filter=50,
interval_save_model=100,
interval_print_cm=10
):
super(self.__class__, self).__init__(
interval_plot_filter=interval_plot_filter,
interval_save_model=interval_save_model,
interval_print_cm=interval_print_cm
)
self.data_dir = data_dir
self.output_dir = output_dir
self.n_folds = n_folds
self.fold_idx = fold_idx
self.batch_size = batch_size
self.input_dims = input_dims
self.n_classes = n_classes
self.seq_length = seq_length
self.n_rnn_layers = n_rnn_layers
self.return_last = return_last
def _run_epoch(self, sess, network, inputs, targets, train_op, is_train):
start_time = time.time()
y = []
y_true = []
total_loss, n_batches = 0.0, 0
for sub_idx, each_data in enumerate(zip(inputs, targets)):
each_x, each_y = each_data
# # Initialize state of LSTM - Unidirectional LSTM
# state = sess.run(network.initial_state)
# Initialize state of LSTM - Bidirectional LSTM
fw_state = sess.run(network.fw_initial_state)
bw_state = sess.run(network.bw_initial_state)
for x_batch, y_batch in iterate_batch_seq_minibatches(inputs=each_x,
targets=each_y,
batch_size=self.batch_size,
seq_length=self.seq_length):
feed_dict = {
network.input_var: x_batch,
network.target_var: y_batch
}
# Unidirectional LSTM
# for i, (c, h) in enumerate(network.initial_state):
# feed_dict[c] = state[i].c
# feed_dict[h] = state[i].h
# _, loss_value, y_pred, state = sess.run(
# [train_op, network.loss_op, network.pred_op, network.final_state],
# feed_dict=feed_dict
# )
for i, (c, h) in enumerate(network.fw_initial_state):
feed_dict[c] = fw_state[i].c
feed_dict[h] = fw_state[i].h
for i, (c, h) in enumerate(network.bw_initial_state):
feed_dict[c] = bw_state[i].c
feed_dict[h] = bw_state[i].h
_, loss_value, y_pred, fw_state, bw_state = sess.run(
[train_op, network.loss_op, network.pred_op, network.fw_final_state, network.bw_final_state],
feed_dict=feed_dict
)
total_loss += loss_value
n_batches += 1
y.append(y_pred)
y_true.append(y_batch)
# Check the loss value
assert not np.isnan(loss_value), \
"Model diverged with loss = NaN"
duration = time.time() - start_time
total_loss /= n_batches
total_y_pred = np.hstack(y)
total_y_true = np.hstack(y_true)
return total_y_true, total_y_pred, total_loss, duration
def finetune(self, pretrained_model_path, n_epochs, resume):
pretrained_model_name = "deepfeaturenet"
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
# Build training and validation networks
train_net = DeepSleepNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
seq_length=self.seq_length,
n_rnn_layers=self.n_rnn_layers,
return_last=self.return_last,
is_train=True,
reuse_params=False,
use_dropout_feature=True,
use_dropout_sequence=True
)
valid_net = DeepSleepNet(
batch_size=self.batch_size,
input_dims=self.input_dims,
n_classes=self.n_classes,
seq_length=self.seq_length,
n_rnn_layers=self.n_rnn_layers,
return_last=self.return_last,
is_train=False,
reuse_params=True,
use_dropout_feature=True,
use_dropout_sequence=True
)
# Initialize parameters
train_net.init_ops()
valid_net.init_ops()
print("Network (layers={})".format(len(train_net.activations)))
print("inputs ({}): {}".format(
train_net.input_var.name, train_net.input_var.get_shape()
))
print("targets ({}): {}".format(
train_net.target_var.name, train_net.target_var.get_shape()
))
for name, act in train_net.activations:
print("{} ({}): {}".format(name, act.name, act.get_shape()))
print(" ")
# Get list of all pretrained parameters
with np.load(pretrained_model_path) as f:
pretrain_params = list(f.keys())
# Remove the network-name-prefix
for i in range(len(pretrain_params)):
pretrain_params[i] = pretrain_params[i].replace(pretrained_model_name, "network")
# Get trainable variables of the pretrained, and new ones
train_vars1 = [v for v in tf.compat.v1.trainable_variables()
if v.name.replace(train_net.name, "network") in pretrain_params]
train_vars2 = list(set(tf.compat.v1.trainable_variables()) - set(train_vars1))
# Optimizer that use different learning rates for each part of the network
train_op, grads_and_vars_op = adam_clipping_list_lr(
loss=train_net.loss_op,
list_lrs=[1e-6, 1e-4],
list_train_vars=[train_vars1, train_vars2],
clip_value=10.0
)
# Make subdirectory for pretraining
output_dir = os.path.join(self.output_dir, "fold{}".format(self.fold_idx), train_net.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Global step for resume training
with tf.compat.v1.variable_scope(train_net.name) as scope:
global_step = tf.Variable(0, name="global_step", trainable=False)
# print "Pretrained parameters:"
# for v in train_vars1:
# print v.name
# print " "
# print "Optimizing parameters:"
# for v in train_vars2:
# print v.name
# print " "
# print "Trainable Variables:"
# for v in tf.compat.v1.trainable_variables():
# print v.name, v.get_shape()
# print " "
# print "All Variables:"
# for v in tf.compat.v1.global_variables():
# print v.name, v.get_shape()
# print " "
# Create a saver
saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(), max_to_keep=0)
# Initialize variables in the graph
sess.run(tf.compat.v1.global_variables_initializer())
# Add the graph structure into the Tensorboard writer
train_summary_wrt = tf.compat.v1.summary.FileWriter(
os.path.join(output_dir, "train_summary"),
sess.graph
)
# Resume the training if applicable
load_pretrain = False
if resume:
if os.path.exists(output_dir):
if os.path.isfile(os.path.join(output_dir, "checkpoint")):
# Restore the last checkpoint
saver.restore(sess, tf.train.latest_checkpoint(output_dir))
print("Model restored")
print("[{}] Resume fine-tuning ...\n".format(datetime.now()))
else:
load_pretrain = True
else:
load_pretrain = True
if load_pretrain:
# Load pre-trained model
print("Loading pre-trained parameters to the model ...")
print(" | --> {} from {}".format(pretrained_model_name, pretrained_model_path))
with np.load(pretrained_model_path) as f:
for k, v in f.items():
if "Adam" in k or "softmax" in k or "power" in k or "global_step" in k:
continue
prev_k = k
k = k.replace(pretrained_model_name, train_net.name)
tmp_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name(k)
sess.run(
tf.compat.v1.assign(
tmp_tensor,
v
)
)
print("assigned {}: {} to {}: {}".format(
prev_k, v.shape, k, tmp_tensor.get_shape()
))
print(" ")
print("[{}] Start fine-tuning ...\n".format(datetime.now()))
# Load data
if sess.run(global_step) < n_epochs:
data_loader = SeqDataLoader(
data_dir=self.data_dir,
n_folds=self.n_folds,
fold_idx=self.fold_idx
)
x_train, y_train, x_valid, y_valid = data_loader.load_train_data()
# Performance history
all_train_loss = np.zeros(n_epochs)
all_train_acc = np.zeros(n_epochs)
all_train_f1 = np.zeros(n_epochs)
all_valid_loss = np.zeros(n_epochs)
all_valid_acc = np.zeros(n_epochs)
all_valid_f1 = np.zeros(n_epochs)
# Loop each epoch
for epoch in range(sess.run(global_step), n_epochs):
# Update parameters and compute loss of training set
y_true_train, y_pred_train, train_loss, train_duration = \
self._run_epoch(
sess=sess, network=train_net,
inputs=x_train, targets=y_train,
train_op=train_op,
is_train=True
)
n_train_examples = len(y_true_train)
train_cm = confusion_matrix(y_true_train, y_pred_train)
train_acc = np.mean(y_true_train == y_pred_train)
train_f1 = f1_score(y_true_train, y_pred_train, average="macro")
# Evaluate the model on the validation set
y_true_val, y_pred_val, valid_loss, valid_duration = \
self._run_epoch(
sess=sess, network=valid_net,
inputs=x_valid, targets=y_valid,
train_op=tf.no_op(),
is_train=False
)
n_valid_examples = len(y_true_val)
valid_cm = confusion_matrix(y_true_val, y_pred_val)
valid_acc = np.mean(y_true_val == y_pred_val)
valid_f1 = f1_score(y_true_val, y_pred_val, average="macro")
all_train_loss[epoch] = train_loss
all_train_acc[epoch] = train_acc
all_train_f1[epoch] = train_f1
all_valid_loss[epoch] = valid_loss
all_valid_acc[epoch] = valid_acc
all_valid_f1[epoch] = valid_f1
# db.train_log(args={
# "n_folds": self.n_folds,
# "fold_idx": self.fold_idx,
# "epoch": epoch,
# "train_step": "finetuning",
# "datetime": datetime.utcnow(),
# "model": train_net.name,
# "n_train_examples": n_train_examples,
# "n_valid_examples": n_valid_examples,
# "train_loss": train_loss,
# "train_acc": train_acc,
# "train_f1": train_f1,
# "train_duration": train_duration,
# "valid_loss": valid_loss,
# "valid_acc": valid_acc,
# "valid_f1": valid_f1,
# "valid_duration": valid_duration,
# })
# Report performance
self.print_performance(
sess, output_dir, train_net.name,
n_train_examples, n_valid_examples,
train_cm, valid_cm, epoch, n_epochs,
train_duration, train_loss, train_acc, train_f1,
valid_duration, valid_loss, valid_acc, valid_f1
)
# Save performance history
np.savez(
os.path.join(output_dir, "perf_fold{}.npz".format(self.fold_idx)),
train_loss=all_train_loss, valid_loss=all_valid_loss,
train_acc=all_train_acc, valid_acc=all_valid_acc,
train_f1=all_train_f1, valid_f1=all_valid_f1,
y_true_val=np.asarray(y_true_val),
y_pred_val=np.asarray(y_pred_val)
)
# Visualize weights from convolutional layers
if ((epoch + 1) % self.interval_plot_filter == 0) or ((epoch + 1) == n_epochs):
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?\/l[0-9]+_conv\/(weights)", output_dir, 16)
self.plot_filters(sess, epoch, train_net.name + "(_[0-9])?/l[0-9]+_conv\/conv1d\/(weights)", output_dir, 16)
# Save checkpoint
sess.run(tf.compat.v1.assign(global_step, epoch+1))
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_path = os.path.join(
output_dir, "model_fold{}.ckpt".format(self.fold_idx)
)
saver.save(sess, save_path, global_step=global_step)
duration = time.time() - start_time
print("Saved model checkpoint ({:.3f} sec)".format(duration))
# Save paramaters
if ((epoch + 1) % self.interval_save_model == 0) or ((epoch + 1) == n_epochs):
start_time = time.time()
save_dict = {}
for v in tf.compat.v1.global_variables():
save_dict[v.name] = sess.run(v)
np.savez(
os.path.join(
output_dir,
"params_fold{}.npz".format(self.fold_idx)),
**save_dict
)
duration = time.time() - start_time
print("Saved trained parameters ({:.3f} sec)".format(duration))
print("Finish fine-tuning")
return os.path.join(output_dir, "params_fold{}.npz".format(self.fold_idx))
| 42.879012
| 137
| 0.497092
| 3,759
| 34,732
| 4.288641
| 0.091514
| 0.021773
| 0.017989
| 0.012902
| 0.803548
| 0.773959
| 0.752311
| 0.73916
| 0.719682
| 0.719682
| 0
| 0.01286
| 0.406714
| 34,732
| 809
| 138
| 42.932015
| 0.769485
| 0.151676
| 0
| 0.693989
| 0
| 0
| 0.050723
| 0.005265
| 0
| 0
| 0
| 0
| 0.003643
| 1
| 0.018215
| false
| 0
| 0.025501
| 0
| 0.056466
| 0.091075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5059e219845b4a249306d11e0aeb71b92919c849
| 101
|
py
|
Python
|
tests/test_util.py
|
eikendev/dudendas
|
b03074deac55e4fb2eed105d2685a19c21651b2e
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
eikendev/dudendas
|
b03074deac55e4fb2eed105d2685a19c21651b2e
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
eikendev/dudendas
|
b03074deac55e4fb2eed105d2685a19c21651b2e
|
[
"MIT"
] | null | null | null |
from dudendas.util import *
def test_textify():
assert textify(" foo bar (3a) ") == "foo bar"
| 16.833333
| 50
| 0.633663
| 14
| 101
| 4.5
| 0.785714
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.217822
| 101
| 5
| 51
| 20.2
| 0.78481
| 0
| 0
| 0
| 0
| 0
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
acf4b1f9aea318929865cb4e4d405e43d405a471
| 19
|
py
|
Python
|
rpiwepd/lib/__init__.py
|
genwch/rpiwepd
|
27ced8ef1255f17b475b231f6aefc1e4f8ab6a27
|
[
"MIT"
] | null | null | null |
rpiwepd/lib/__init__.py
|
genwch/rpiwepd
|
27ced8ef1255f17b475b231f6aefc1e4f8ab6a27
|
[
"MIT"
] | null | null | null |
rpiwepd/lib/__init__.py
|
genwch/rpiwepd
|
27ced8ef1255f17b475b231f6aefc1e4f8ab6a27
|
[
"MIT"
] | null | null | null |
from .epd import *
| 9.5
| 18
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 19
| 1
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a078c75b2e45c2072b55df47666e43db044972b
| 348
|
py
|
Python
|
lazythumbs/tests/__init__.py
|
caktus/lazythumbs
|
006ac42f9f4ac600d4c85d0929f4e2c755d4f853
|
[
"MIT"
] | 1
|
2017-07-24T22:06:25.000Z
|
2017-07-24T22:06:25.000Z
|
lazythumbs/tests/__init__.py
|
caktus/lazythumbs
|
006ac42f9f4ac600d4c85d0929f4e2c755d4f853
|
[
"MIT"
] | null | null | null |
lazythumbs/tests/__init__.py
|
caktus/lazythumbs
|
006ac42f9f4ac600d4c85d0929f4e2c755d4f853
|
[
"MIT"
] | null | null | null |
from lazythumbs.tests.test_server import RenderTest, GetViewTest
from lazythumbs.tests.test_templatetag import LazythumbSyntaxTest, LazythumbGeometryCompileTest, LazythumbRenderTest
from lazythumbs.tests.test_templatetag import ImgAttrsRenderTest
from lazythumbs.tests.test_util import TestGeometry, TestComputeIMG, TestGetImgAttrs, TestGetFormat
| 69.6
| 116
| 0.893678
| 34
| 348
| 9.029412
| 0.529412
| 0.18241
| 0.247557
| 0.299674
| 0.260586
| 0.260586
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066092
| 348
| 4
| 117
| 87
| 0.944615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a140e53ca915217379ec66d0341919746bb8c0d
| 58
|
py
|
Python
|
src/apps/trainings/viewsets/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 27
|
2020-05-03T11:01:27.000Z
|
2022-03-17T05:33:10.000Z
|
src/apps/trainings/viewsets/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 54
|
2020-05-09T01:18:41.000Z
|
2022-01-22T10:31:15.000Z
|
src/apps/trainings/viewsets/__init__.py
|
sanderland/katago-server
|
6414fab080d007c05068a06ff4f25907b92848bd
|
[
"MIT"
] | 9
|
2020-09-29T11:31:32.000Z
|
2022-03-09T01:37:50.000Z
|
from .network import NetworkViewSet, NetworkViewSetForElo
| 29
| 57
| 0.87931
| 5
| 58
| 10.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 58
| 1
| 58
| 58
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a22fcc3c8ad1a3e760e61dd35132bec7ac943e6
| 6,735
|
py
|
Python
|
src/pymap3d/ned.py
|
ryanpavlick/pymap3d
|
968f6837b1550503461f884d8ce2e1b10c0db1f4
|
[
"BSD-2-Clause"
] | 116
|
2020-02-23T02:04:18.000Z
|
2022-03-29T00:19:37.000Z
|
src/pymap3d/ned.py
|
ryanpavlick/pymap3d
|
968f6837b1550503461f884d8ce2e1b10c0db1f4
|
[
"BSD-2-Clause"
] | 19
|
2020-03-02T08:13:46.000Z
|
2022-03-30T17:50:00.000Z
|
src/pymap3d/ned.py
|
ryanpavlick/pymap3d
|
968f6837b1550503461f884d8ce2e1b10c0db1f4
|
[
"BSD-2-Clause"
] | 28
|
2020-02-24T11:56:03.000Z
|
2022-03-29T02:29:37.000Z
|
""" Transforms involving NED North East Down """
from __future__ import annotations
import typing
from .enu import geodetic2enu, aer2enu, enu2aer
from .ecef import ecef2geodetic, ecef2enuv, ecef2enu, enu2ecef
from .ellipsoid import Ellipsoid
if typing.TYPE_CHECKING:
from numpy import ndarray
def aer2ned(
az: ndarray, elev: ndarray, slantRange: ndarray, deg: bool = True
) -> tuple[ndarray, ndarray, ndarray]:
"""
converts azimuth, elevation, range to target from observer to North, East, Down
Parameters
-----------
az : float
azimuth
elev : float
elevation
slantRange : float
slant range [meters]
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = aer2enu(az, elev, slantRange, deg=deg)
return n, e, -u
def ned2aer(
n: ndarray, e: ndarray, d: ndarray, deg: bool = True
) -> tuple[ndarray, ndarray, ndarray]:
"""
converts North, East, Down to azimuth, elevation, range
Parameters
----------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
az : float
azimuth
elev : float
elevation
slantRange : float
slant range [meters]
"""
return enu2aer(e, n, -d, deg=deg)
def ned2geodetic(
n: ndarray,
e: ndarray,
d: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
Converts North, East, Down to target latitude, longitude, altitude
Parameters
----------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
lat : float
target geodetic latitude
lon : float
target geodetic longitude
h : float
target altitude above geodetic ellipsoid (meters)
"""
x, y, z = enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
return ecef2geodetic(x, y, z, ell, deg=deg)
def ned2ecef(
n: ndarray,
e: ndarray,
d: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
North, East, Down to target ECEF coordinates
Parameters
----------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
x : float
ECEF x coordinate (meters)
y : float
ECEF y coordinate (meters)
z : float
ECEF z coordinate (meters)
"""
return enu2ecef(e, n, -d, lat0, lon0, h0, ell, deg=deg)
def ecef2ned(
x: ndarray,
y: ndarray,
z: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
Convert ECEF x,y,z to North, East, Down
Parameters
----------
x : float
ECEF x coordinate (meters)
y : float
ECEF y coordinate (meters)
z : float
ECEF z coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg)
return n, e, -u
def geodetic2ned(
lat: ndarray,
lon: ndarray,
h: ndarray,
lat0: ndarray,
lon0: ndarray,
h0: ndarray,
ell: Ellipsoid = None,
deg: bool = True,
) -> tuple[ndarray, ndarray, ndarray]:
"""
convert latitude, longitude, altitude of target to North, East, Down from observer
Parameters
----------
lat : float
target geodetic latitude
lon : float
target geodetic longitude
h : float
target altitude above geodetic ellipsoid (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid (meters)
ell : Ellipsoid, optional
reference ellipsoid
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = geodetic2enu(lat, lon, h, lat0, lon0, h0, ell, deg=deg)
return n, e, -u
def ecef2nedv(
x: float, y: float, z: float, lat0: float, lon0: float, deg: bool = True
) -> tuple[float, float, float]:
"""
for VECTOR between two points
Parameters
----------
x : float
ECEF x coordinate (meters)
y : float
ECEF y coordinate (meters)
z : float
ECEF z coordinate (meters)
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
deg : bool, optional
degrees input/output (False: radians in/out)
Results
-------
(Vector)
n : float
North NED coordinate (meters)
e : float
East NED coordinate (meters)
d : float
Down NED coordinate (meters)
"""
e, n, u = ecef2enuv(x, y, z, lat0, lon0, deg=deg)
return n, e, -u
| 22.009804
| 86
| 0.585449
| 770
| 6,735
| 5.114286
| 0.119481
| 0.121889
| 0.10132
| 0.055866
| 0.804469
| 0.7839
| 0.775775
| 0.76968
| 0.76968
| 0.754698
| 0
| 0.013931
| 0.317892
| 6,735
| 305
| 87
| 22.081967
| 0.843274
| 0.585895
| 0
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102941
| false
| 0
| 0.088235
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5928fd8bf3c19beebe27419c9d9fea66df3cb49
| 3,262
|
py
|
Python
|
GrowingNeuralGasPlotter.py
|
lucasolip/UnsupervisedArchitectures
|
fd724cdbb6a0fba8274edba1f4de49a74ab315bd
|
[
"MIT"
] | null | null | null |
GrowingNeuralGasPlotter.py
|
lucasolip/UnsupervisedArchitectures
|
fd724cdbb6a0fba8274edba1f4de49a74ab315bd
|
[
"MIT"
] | null | null | null |
GrowingNeuralGasPlotter.py
|
lucasolip/UnsupervisedArchitectures
|
fd724cdbb6a0fba8274edba1f4de49a74ab315bd
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import tensorflow as tf
class GrowingNeuralGasPlotter(object):
@staticmethod
def plotGraphConnectedComponent(pathFigure, nameFigure, A, N, X, edges):
if len(X[0]) == 3:
figure = plt.figure()
axis = figure.add_subplot(projection='3d')
axis.scatter(X[:, 0], X[:, 1], X[:, 2])
x = [A[index][0].numpy() for index in tf.range(A.shape[0])]
y = [A[index][1].numpy() for index in tf.range(A.shape[0])]
z = [A[index][2].numpy() for index in tf.range(A.shape[0])]
graphZero = axis.scatter(x, y, z)
for edge in edges:
axis.plot(edge[:, 0], edge[:, 1], edge[:, 2], 'r-')
elif len(X[0]) == 2:
figure = plt.figure()
axis = figure.add_subplot(projection='3d')
axis.scatter(X[:, 0], X[:, 1])
x = [A[index][0].numpy() for index in tf.range(A.shape[0])]
y = [A[index][1].numpy() for index in tf.range(A.shape[0])]
graphZero = axis.scatter(x, y)
for edge in edges:
axis.plot(edge[:, 0], edge[:, 1], 'r-')
# matplotlib.pyplot.show()
figure.savefig(pathFigure + '//' + nameFigure + '.png', transparent=False, dpi=80, bbox_inches="tight")
plt.close(figure)
@staticmethod
def plotNetworkStructure2D(A, X, edges, title="", save=False, pathFigure=".//", nameFigure="networkStructure2D"):
fig = plt.figure()
ax = fig.add_subplot()
ax.scatter(X[:, 0], X[:, 1])
ax.scatter(A[:, 0], A[:, 1], c='r')
for edge in edges:
ax.plot(edge[:, 0], edge[:, 1], c='r')
ax.set_title(title)
if save:
fig.savefig(pathFigure + '//' + nameFigure + '.png', transparent=False, dpi=80, bbox_inches="tight")
@staticmethod
def plotNetworkStructure3D(A, X, edges, title="", save=False, pathFigure=".//", nameFigure="networkStructure2D"):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2])
ax.scatter(A[:, 0], A[:, 1], A[:, 2], 'r')
for edge in edges:
ax.plot(edge[:, 0], edge[:, 1], edge[:, 2], 'r-')
ax.set_title(title)
if save:
fig.savefig(pathFigure + '//' + nameFigure + '.png', transparent=False, dpi=80, bbox_inches="tight")
@staticmethod
def plotClusters2D(growingNeuralGas, X, title=""):
fig = plt.figure()
ax = fig.add_subplot()
clusters = [0 for i in range(X.shape[0])]
for i in range(X.shape[0]):
clusters[i] = growingNeuralGas.predict(X[i])
ax.scatter(X[:, 0], X[:, 1], c=clusters)
ax.set_title(title)
@staticmethod
def plotClusters3D(growingNeuralGas, X, title=""):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
clusters = [0 for i in range(X.shape[0])]
for i in range(X.shape[0]):
clusters[i] = growingNeuralGas.predict(X[i])
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=clusters)
ax.set_title(title)
@staticmethod
def show():
plt.show()
| 39.780488
| 118
| 0.523605
| 416
| 3,262
| 4.074519
| 0.170673
| 0.031858
| 0.031858
| 0.035398
| 0.810619
| 0.810619
| 0.787611
| 0.787021
| 0.724484
| 0.710324
| 0
| 0.028808
| 0.29767
| 3,262
| 82
| 119
| 39.780488
| 0.711043
| 0.007357
| 0
| 0.57971
| 0
| 0
| 0.029151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.028986
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c59e8e61f9141dc03f11a055fa4a295a8b6650b8
| 146
|
py
|
Python
|
build_gpcr/management/commands/build_endogenous_ligands.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 21
|
2016-01-20T09:33:14.000Z
|
2021-12-20T19:19:45.000Z
|
build_gpcr/management/commands/build_endogenous_ligands.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 75
|
2016-02-26T16:29:58.000Z
|
2022-03-21T12:35:13.000Z
|
build_gpcr/management/commands/build_endogenous_ligands.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 77
|
2016-01-22T08:44:26.000Z
|
2022-02-01T15:54:56.000Z
|
from build.management.commands.build_endogenous_ligands import Command as BuildEndogenousLigands
class Command(BuildEndogenousLigands):
pass
| 29.2
| 96
| 0.863014
| 15
| 146
| 8.266667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09589
| 146
| 5
| 97
| 29.2
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c5a64c18b42a86afdc12e1669501e0e9abbc3754
| 11,909
|
py
|
Python
|
Prob_WE/wasserstein/operators.py
|
albpurpura/PE4IR
|
54c5d471181cdb64225ecd738577b9f1f94c8d24
|
[
"Apache-2.0"
] | null | null | null |
Prob_WE/wasserstein/operators.py
|
albpurpura/PE4IR
|
54c5d471181cdb64225ecd738577b9f1f94c8d24
|
[
"Apache-2.0"
] | null | null | null |
Prob_WE/wasserstein/operators.py
|
albpurpura/PE4IR
|
54c5d471181cdb64225ecd738577b9f1f94c8d24
|
[
"Apache-2.0"
] | null | null | null |
"""
Author: Marco Maggipinto
Copyright: (C) 2019-2020 <http://www.dei.unipd.it/
Department of Information Engineering> (DEI), <http://www.unipd.it/ University of Padua>, Italy
License: <http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0>
"""
import time
import torch
class BuresProduct:
def __init__(self, e=1E-8, num_iters=20, reg=2.0):
self.e = e
self.num_iters = num_iters
self.reg = reg
def __call__(self, a, b, L_A, L_B):
bures = BuresMetric.apply
dim = a.shape[1]
prod = a.view(-1, 1, dim).matmul(b.view(-1, dim, 1)).squeeze(dim=2)
return prod + bures(L_A, L_B, self.e, self.num_iters, self.reg)
class BuresProductNormalized:
def __init__(self, e=1E-8, num_iters=20, reg=2.0):
self.e = e
self.num_iters = num_iters
self.reg = reg
def __call__(self, a, b, L_A, L_B):
bures = BuresMetricNormalized.apply
dim = a.shape[1]
prod = a.view(-1, 1, dim).matmul(b.view(-1, dim, 1)).squeeze(dim=2) / (a.norm(dim=1) * b.norm(dim=1)).view(-1, 1)
return prod + bures(L_A, L_B, self.e, self.num_iters, self.reg)
class DistanceW2:
def __init__(self, e=1E-8, num_iters=20, reg=2.0):
self.e = e
self.num_iters = num_iters
self.reg = reg
def __call__(self, a, b, L_A, L_B):
bures = BuresMetric2.apply
dim = a.shape[1]
diff = a - b
dist = torch.norm(diff, dim=1, keepdim=True)
return dist ** 2 # + 1/(dim+2)*bures(L_A, L_B, self.e, self.num_iters, self.reg)
class BuresProductNormalizedModule(torch.nn.Module):
def __init__(self, e=1E-8, num_iters=20, reg=2.0):
super().__init__()
self.e = e
self.num_iters = num_iters
self.reg = reg
def forward(self, a, b, L_A, L_B):
bures = BuresMetricNormalized.apply
dim = a.shape[1]
prod = a.view(-1, 1, dim).matmul(b.view(-1, dim, 1)).squeeze(dim=2) / (a.norm(dim=1) * b.norm(dim=1)).view(-1,
1)
return prod + bures(L_A, L_B, self.e, self.num_iters, self.reg)
class BuresMetric(torch.autograd.Function):
@staticmethod
def forward(ctx, L_A, L_B, e=1E-8, num_iters=20, reg=2.0):
device = L_A.device
batch_size = L_A.shape[0]
dim = L_A.shape[1]
if L_A.shape[2] != dim:
raise Exception("Matrix must be square")
A = L_A.matmul(L_A.permute((0, 2, 1))) + e * torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(
device)
B = L_B.matmul(L_B.permute((0, 2, 1))) + e * torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(
device)
Y1, Z1 = matrix_square_root(A, num_iters, reg)
supp = Y1.matmul(B).matmul(Y1)
Y2, Z2 = matrix_square_root(supp, num_iters, reg)
T_AB = Z1.matmul(Y2).matmul(Z1)
T_BA = Y1.matmul(Z2).matmul(Y1)
output = torch.zeros((batch_size, 1)).to(device)
for i in range(batch_size):
output[i, 0] = torch.trace(Y2[i, :, :])
ctx.save_for_backward(L_A, L_B, T_AB, T_BA)
return output
@staticmethod
def backward(ctx, grad_output):
L_A, L_B, T_AB, T_BA = ctx.saved_tensors
device = L_A.device
batch_size = L_A.shape[0]
dim = L_A.shape[1]
I = torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(device)
# grad_L_A = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * (I - T_AB).matmul(L_A)
# grad_L_B = grad_output.expand(-1, dim*dim).view(-1, dim,dim) * (I - T_BA).matmul(L_B)
grad_L_A = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * T_AB.matmul(L_A)
grad_L_B = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * T_BA.matmul(L_B)
return grad_L_A, grad_L_B, None, None, None
class BuresMetricNormalized(torch.autograd.Function):
@staticmethod
def forward(ctx, L_A, L_B, e=1E-8, num_iters=20, reg=2.0):
device = L_A.device
batch_size = L_A.shape[0]
dim = L_A.shape[1]
if L_A.shape[2] != dim:
raise Exception("Matrix must be square")
A = L_A.matmul(L_A.permute((0, 2, 1))) + e * torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(
device)
B = L_B.matmul(L_B.permute((0, 2, 1))) + e * torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(
device)
Y1, Z1 = matrix_square_root(A, num_iters, reg)
supp = Y1.matmul(B).matmul(Y1)
Y2, Z2 = matrix_square_root(supp, num_iters, reg)
T_AB = Z1.matmul(Y2).matmul(Z1)
T_BA = Y1.matmul(Z2).matmul(Y1)
output = torch.zeros((batch_size, 1)).to(device)
trA = torch.zeros((batch_size, 1)).to(device)
trB = torch.zeros((batch_size, 1)).to(device)
for i in range(batch_size):
trA[i, 0] = torch.trace(A[i, :, :])
trB[i, 0] = torch.trace(B[i, :, :])
output[i, 0] = torch.trace(Y2[i, :, :]) / torch.sqrt(trA[i] * trB[i])
ctx.save_for_backward(L_A, L_B, T_AB, T_BA, trA, trB, output)
return output
@staticmethod
def backward(ctx, grad_output):
L_A, L_B, T_AB, T_BA, trA, trB, output = ctx.saved_tensors
device = L_A.device
batch_size = L_A.shape[0]
dim = L_A.shape[1]
I = torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(device)
# grad_L_A = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * (I - T_AB).matmul(L_A)Normalized
# grad_L_B = grad_output.expand(-1, dim*dim).view(-1, dim,dim) * (I - T_BA).matmul(L_B)
grad_L_A = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * T_AB.matmul(L_A) / (trA * trB).sqrt().view(
batch_size, 1, 1) + \
(output * trB / ((trA * trB) ** 3).sqrt()).view(-1, 1, 1) * L_A
grad_L_B = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * T_BA.matmul(L_B) / (trA * trB).sqrt().view(
batch_size, 1, 1) + \
(output * trA / ((trA * trB) ** 3).sqrt()).view(-1, 1, 1) * L_B
return grad_L_A, grad_L_B, None, None, None
class BuresMetric2(torch.autograd.Function):
@staticmethod
def forward(ctx, L_A, L_B, e=1E-8, num_iters=20, reg=2.0):
device = L_A.device
batch_size = L_A.shape[0]
dim = L_A.shape[1]
if L_A.shape[2] != dim:
raise Exception("Matrix must be square")
A = L_A.matmul(L_A.permute((0, 2, 1))) + e * torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(
device)
B = L_B.matmul(L_B.permute((0, 2, 1))) + e * torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(
device)
Y1, Z1 = matrix_square_root(A, num_iters, reg)
supp = Y1.matmul(B).matmul(Y1)
Y2, Z2 = matrix_square_root(supp, num_iters, reg)
T_AB = Z1.matmul(Y2).matmul(Z1)
T_BA = Y1.matmul(Z2).matmul(Y1)
output = torch.zeros((batch_size, 1)).to(device)
for i in range(batch_size):
output[i, 0] = torch.trace(A[i, :, :] + B[i, :, :] - 2 * Y2[i, :, :])
ctx.save_for_backward(L_A, L_B, T_AB, T_BA)
return output
@staticmethod
def backward(ctx, grad_output):
L_A, L_B, T_AB, T_BA = ctx.saved_tensors
device = L_A.device
batch_size = L_A.shape[0]
dim = L_A.shape[1]
I = torch.eye(dim).view((1, dim, dim)).repeat(batch_size, 1, 1).to(device)
grad_L_A = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * (I - T_AB).matmul(L_A)
grad_L_B = grad_output.expand(-1, dim * dim).view(-1, dim, dim) * (I - T_BA).matmul(L_B)
# grad_L_A = grad_output.expand(-1, dim*dim).view(-1, dim,dim) * T_AB.matmul(L_A)
# grad_L_B = grad_output.expand(-1, dim*dim).view(-1, dim,dim) * T_BA.matmul(L_B)
return grad_L_A, grad_L_B, None, None, None
class RelTol:
def __init__(self, dim):
self.old_param = torch.zeros((1, dim))
def __call__(self, param):
param = param.detach()
device = param.device
tol = (param - self.old_param.to(device)).norm()
self.old_param = param.data.clone()
return tol / param.norm()
def centroid(means, L, metric, doc_lengths, tol=1E-3, lrd=0.9999):
device = means.device
dim = means.shape[1]
t = RelTol(dim ** 2)
n_docs = len(doc_lengths)
Lc = torch.randn((n_docs, dim, dim), requires_grad=True, device=device)
mc = torch.randn((n_docs, dim), requires_grad=True, device=device)
tl = float('Inf')
optimizer = torch.optim.SGD(list((Lc, mc)), lr=0.1)
ind = get_indeces(doc_lengths)
# lc_exp = Lc[ind, :, :]
while tl > tol:
ind = get_indeces(doc_lengths)
# start = time.time()
optimizer.zero_grad()
# lc_exp = Lc.index_select(0, ind.to(Lc.device))
# mc_exp = mc.index_select(0, ind.to(Lc.device))
loss = -metric(mc[ind, :], means, Lc[ind, :, :], L).mean()
# loss = -metric(mc_exp, means, lc_exp, L).mean()
loss.backward()
optimizer.step()
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lrd
# tl = t(Lc.view(n_docs, -1))
tl = t(Lc.data.view(n_docs, -1))
# print('time for one cycle: %2.4s' % (time.time() - start))
return mc, Lc
def centroid_alt(w, q, metric, doc_lengths, tol=1E-3, lrd=0.9999):
device = w.device
dim = 50
# dim = w.shape[1]
t = RelTol(dim ** 2)
n_docs = len(doc_lengths)
Lc = torch.randn((n_docs, dim, dim), requires_grad=True, device=device)
mc = torch.randn((n_docs, dim), requires_grad=True, device=device)
tl = float('Inf')
optimizer = torch.optim.SGD(list((Lc, mc)), lr=0.1)
# ind = get_indeces(doc_lengths)
# lc_exp = Lc[ind, :, :]
while tl > tol:
m, v = (w[q, 0:dim].view(-1, dim), w[q, dim:].view((-1, dim, dim)))
ind = get_indeces(doc_lengths)
# start = time.time()
optimizer.zero_grad()
# lc_exp = Lc.index_select(0, ind.to(Lc.device))
# mc_exp = mc.index_select(0, ind.to(Lc.device))
loss = -metric(mc[ind, :], m, Lc[ind, :, :], v).mean()
# loss = -metric(mc_exp, means, lc_exp, L).mean()
loss.backward()
optimizer.step()
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lrd
# tl = t(Lc.view(n_docs, -1))
tl = t(Lc.data.view(n_docs, -1))
# print('time for one cycle: %2.4s' % (time.time() - start))
return mc, Lc
def get_indeces(doc_lenghts):
n_docs = len(doc_lenghts)
l = []
for i in range(n_docs):
l.append(torch.ones(doc_lenghts[i]) * i)
return torch.cat(l).long()
def matrix_square_root(A, num_iters=20, reg=2.0):
A = A.detach()
device = A.device
batch_size = A.shape[0]
dim = A.shape[1]
if A.shape[2] != dim:
raise Exception("Matrix must be square")
normA = reg * frobenius(A)
Y = A.view(batch_size, -1).div(normA)
Y = Y.view(batch_size, dim, dim)
I = torch.eye(dim).reshape(1, dim, dim).repeat(batch_size, 1, 1).to(device)
Z = torch.eye(dim).reshape(1, dim, dim).repeat(batch_size, 1, 1).to(device)
for i in range(num_iters):
T = 0.5 * (3.0 * I - torch.matmul(Z, Y))
Y = torch.matmul(Y, T)
Z = torch.matmul(T, Z)
sqrtA = Y.view(batch_size, -1) * torch.sqrt(normA)
sqrtA = sqrtA.view(batch_size, dim, dim)
sqrtAinv = Z.view(batch_size, -1).div(torch.sqrt(normA))
sqrtAinv = sqrtAinv.view(batch_size, dim, dim)
return sqrtA, sqrtAinv
def frobenius(A):
batch_size = A.shape[0]
return torch.norm(A.view(batch_size, -1), dim=1).view(batch_size, 1)
| 37.332288
| 121
| 0.571752
| 1,951
| 11,909
| 3.311123
| 0.093798
| 0.018576
| 0.039009
| 0.039164
| 0.823375
| 0.797368
| 0.789938
| 0.78065
| 0.78065
| 0.76548
| 0
| 0.033883
| 0.261483
| 11,909
| 318
| 122
| 37.449686
| 0.700625
| 0.119993
| 0
| 0.640351
| 0
| 0
| 0.009392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.008772
| 0
| 0.20614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a869172e2eed90c5a66784f962ea72e355055a89
| 14,012
|
py
|
Python
|
tests/schema/mysql/test_mysql_schema.py
|
mandarvaze/orm
|
35b3858caafab91c8690fc325a9472c04de4d00b
|
[
"MIT"
] | null | null | null |
tests/schema/mysql/test_mysql_schema.py
|
mandarvaze/orm
|
35b3858caafab91c8690fc325a9472c04de4d00b
|
[
"MIT"
] | null | null | null |
tests/schema/mysql/test_mysql_schema.py
|
mandarvaze/orm
|
35b3858caafab91c8690fc325a9472c04de4d00b
|
[
"MIT"
] | null | null | null |
from src.masonite.orm.grammar.mysql_grammar import MySQLGrammar
from src.masonite.orm.blueprint.Blueprint import Blueprint
from src.masonite.orm.grammar.GrammarFactory import GrammarFactory
from src.masonite.orm.schema.Schema import Schema
import unittest, inspect
class BaseTestCreateGrammar:
def setUp(self):
self.schema = Schema.on("mysql")
def test_can_compile_column(self):
with self.schema.create("users") as blueprint:
blueprint.string("name")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_column_constraint(self):
with self.schema.create("users") as blueprint:
blueprint.string("name").unique()
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_multiple_columns(self):
with self.schema.create("users") as blueprint:
blueprint.string("name").nullable()
blueprint.integer("age")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_not_null(self):
with self.schema.create("users") as blueprint:
blueprint.string("name")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_primary_key(self):
with self.schema.create("users") as blueprint:
blueprint.increments("id")
blueprint.string("name")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_primary_key(self):
with self.schema.create("users") as blueprint:
blueprint.increments("id")
blueprint.string("name")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_multiple_constraints(self):
with self.schema.create("users") as blueprint:
blueprint.increments("id")
blueprint.string("name").unique()
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_enum(self):
with self.schema.create("users") as blueprint:
blueprint.enum("age", [1, 2, 3]).nullable()
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_column_exists(self):
to_sql = self.schema.has_column("users", "email", query_only=True)
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(to_sql, sql)
def test_drop_table(self):
to_sql = self.schema.drop_table("users", query_only=True)
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(to_sql, sql)
def test_drop_table_if_exists(self):
to_sql = self.schema.drop_table_if_exists("users", query_only=True)
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(to_sql, sql)
def test_drop_column(self):
with self.schema.table("users") as blueprint:
blueprint.drop_column("name")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_large_blueprint(self):
with self.schema.create("users") as blueprint:
blueprint.string("name")
blueprint.string("email")
blueprint.string("password")
blueprint.integer("age").nullable()
blueprint.enum("type", ["Open", "Closed"])
blueprint.datetime("pick_up")
blueprint.binary("profile")
blueprint.boolean("of_age")
blueprint.char("first_initial", length=4)
blueprint.date("birthday")
blueprint.decimal("credit", 17, 6)
blueprint.text("description")
blueprint.unsigned("bank").nullable()
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_timestamps_columns_with_default(self):
with self.schema.create("users") as blueprint:
blueprint.timestamps()
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_timestamp_column_without_default(self):
with self.schema.create("users") as blueprint:
blueprint.timestamp("logged_at")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_timestamps_columns_mixed_defaults_and_not_default(self):
with self.schema.create("users") as blueprint:
blueprint.timestamps()
blueprint.timestamp("logged_at")
blueprint.timestamp("expirated_at")
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_timestamp_nullable_columns(self):
with self.schema.create("users") as blueprint:
blueprint.timestamp("logged_at")
blueprint.timestamp("expirated_at").nullable()
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
def test_can_compile_timestamps_columns_with_default_of_now(self):
with self.schema.create("users") as blueprint:
blueprint.timestamp("logged_at", now=True)
sql = getattr(
self, inspect.currentframe().f_code.co_name.replace("test_", "")
)()
self.assertEqual(blueprint.to_sql(), sql)
class TestMySQLCreateGrammar(BaseTestCreateGrammar, unittest.TestCase):
def setUp(self):
self.schema = Schema.on("mysql")
def can_compile_column(self):
"""
with self.schema.create('users') as blueprint:
blueprint.string('name')
"""
return "CREATE TABLE `users` (`name` VARCHAR(255) NOT NULL)"
def can_compile_column_constraint(self):
"""
with self.schema.create('users') as blueprint:
blueprint.string('name').unique()
"""
return "CREATE TABLE `users` (`name` VARCHAR(255) NOT NULL, CONSTRAINT name_unique UNIQUE (name))"
def can_compile_multiple_columns(self):
"""
with self.schema.create('users') as blueprint:
blueprint.string('name').nullable()
blueprint.integer('age')
"""
return (
"CREATE TABLE `users` ("
"`name` VARCHAR(255), "
"`age` INT(11) NOT NULL"
")"
)
def can_compile_not_null(self):
"""
with self.schema.create('users') as blueprint:
blueprint.string('name')
"""
return "CREATE TABLE `users` (" "`name` VARCHAR(255) NOT NULL" ")"
def can_compile_primary_key(self):
"""
with self.schema.create('users') as blueprint:
blueprint.increments('id')
blueprint.string('name')
"""
return (
"CREATE TABLE `users` ("
"`id` INT AUTO_INCREMENT PRIMARY KEY NOT NULL, "
"`name` VARCHAR(255) NOT NULL"
")"
)
def can_compile_primary_key(self):
"""
with self.schema.create('users') as blueprint:
blueprint.increments('id')
blueprint.string('name')
"""
return (
"CREATE TABLE `users` ("
"`id` INT AUTO_INCREMENT PRIMARY KEY NOT NULL, "
"`name` VARCHAR(255) NOT NULL"
")"
)
def can_compile_multiple_constraints(self):
"""
with self.schema.create('users') as blueprint:
blueprint.increments('id')
blueprint.string('name').unique()
"""
return (
"CREATE TABLE `users` ("
"`id` INT AUTO_INCREMENT PRIMARY KEY NOT NULL, "
"`name` VARCHAR(255) NOT NULL, "
"CONSTRAINT name_unique UNIQUE (name)"
")"
)
def can_compile_enum(self):
"""
with self.schema.create('users') as blueprint:
blueprint.enum('age', [1,2,3]).nullable()
"""
return "CREATE TABLE `users` (" "`age` ENUM('1','2','3')" ")"
def column_exists(self):
"""
self.schema.has_column('users', 'email', query_only=True)
"""
return "SHOW COLUMNS FROM `users` LIKE 'email'"
def drop_table(self):
"""
to_sql = self.schema.drop_table('users', query_only=True)
"""
return "DROP TABLE `users`"
def drop_table_if_exists(self):
"""
to_sql = self.schema.drop_table_if_exists('users', query_only=True)
"""
return "DROP TABLE IF EXISTS `users`"
def drop_column(self):
"""
with self.schema.table('users') as blueprint:
blueprint.drop_column('name')
"""
return "ALTER TABLE `users` " "DROP COLUMN `name`"
def can_compile_large_blueprint(self):
"""
with self.schema.create('users') as blueprint:
blueprint.string('name')
blueprint.string('email')
blueprint.string('password')
blueprint.integer('age').nullable()
blueprint.enum('type', ['Open', 'Closed'])
blueprint.datetime('pick_up')
blueprint.binary('profile')
blueprint.boolean('of_age')
blueprint.char('first_initial', length=4)
blueprint.date('birthday')
blueprint.decimal('credit', 17,6)
blueprint.text('description')
blueprint.unsigned('bank').nullable()
"""
return (
"CREATE TABLE `users` ("
"`name` VARCHAR(255) NOT NULL, "
"`email` VARCHAR(255) NOT NULL, "
"`password` VARCHAR(255) NOT NULL, "
"`age` INT(11), "
"`type` ENUM('Open','Closed') NOT NULL, "
"`pick_up` DATETIME NOT NULL, "
"`profile` LONGBLOB NOT NULL, "
"`of_age` BOOLEAN NOT NULL, "
"`first_initial` CHAR(4) NOT NULL, "
"`birthday` DATE NOT NULL, "
"`credit` DECIMAL(17, 6) NOT NULL, "
"`description` TEXT NOT NULL, "
"`bank` INT UNSIGNED"
")"
)
def test_default_string_length(self):
with self.schema.table("users") as blueprint:
blueprint.string("name")
self.assertEqual(str(blueprint._columns[0].length), "255")
return "ALTER TABLE `users` " "ADD `name` VARCHAR(255) NOT NULL"
self.assertEqual(blueprint.to_sql(), sql)
Schema.set_default_string_length("191")
with self.schema.table("users") as blueprint:
blueprint.string("name")
self.assertEqual(str(blueprint._columns[0].length), "191")
return "ALTER TABLE `users` " "ADD `name` VARCHAR(191) NOT NULL"
self.assertEqual(blueprint.to_sql(), sql)
def can_compile_timestamps_columns_with_default(self):
"""
with self.schema.create('users') as blueprint:
blueprint.timestamps()
"""
return (
"CREATE TABLE `users` ("
"`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, "
"`updated_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP"
")"
)
def can_compile_timestamps_columns_with_default_of_now(self):
"""
with self.schema.create('users') as blueprint:
blueprint.timestamp('logged_at', now=True)
"""
return "CREATE TABLE `users` (" "`logged_at` TIMESTAMP DEFAULT NOW()" ")"
def can_compile_timestamp_column_without_default(self):
"""
with self.schema.create('users') as blueprint:
blueprint.timestamp('logged_at')
"""
return "CREATE TABLE `users` (" "`logged_at` TIMESTAMP NOT NULL" ")"
def can_compile_timestamps_columns_mixed_defaults_and_not_default(self):
"""
with self.schema.create('users') as blueprint:
blueprint.timestamps()
blueprint.timestamp('logged_at')
blueprint.timestamp('expirated_at')
"""
return (
"CREATE TABLE `users` ("
"`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, "
"`updated_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, "
"`logged_at` TIMESTAMP NOT NULL, "
"`expirated_at` TIMESTAMP NOT NULL"
")"
)
def can_compile_timestamp_nullable_columns(self):
"""
with self.schema.create('users') as blueprint:
blueprint.timestamp('logged_at')
blueprint.timestamp('expirated_at').nullable()
"""
return (
"CREATE TABLE `users` ("
"`logged_at` TIMESTAMP NOT NULL, "
"`expirated_at` TIMESTAMP"
")"
)
| 32.662005
| 106
| 0.58143
| 1,488
| 14,012
| 5.284274
| 0.083333
| 0.050871
| 0.056976
| 0.101742
| 0.908178
| 0.895205
| 0.893298
| 0.868625
| 0.850566
| 0.811395
| 0
| 0.007204
| 0.286683
| 14,012
| 428
| 107
| 32.738318
| 0.77949
| 0.149301
| 0
| 0.592157
| 0
| 0.003922
| 0.193893
| 0.001869
| 0
| 0
| 0
| 0
| 0.086275
| 1
| 0.152941
| false
| 0.007843
| 0.019608
| 0
| 0.258824
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a89a53e13ecb73b4d2bf5fea96f5746f2db7b3f1
| 162
|
py
|
Python
|
app/config/views.py
|
maro99/yapen
|
0de7aa9d4b152aadd18511be6e536e89645452d9
|
[
"MIT"
] | 1
|
2019-04-28T12:21:51.000Z
|
2019-04-28T12:21:51.000Z
|
app/config/views.py
|
maro99/yapen
|
0de7aa9d4b152aadd18511be6e536e89645452d9
|
[
"MIT"
] | 5
|
2018-07-30T05:44:44.000Z
|
2020-06-05T18:56:41.000Z
|
app/config/views.py
|
maro99/yapen
|
0de7aa9d4b152aadd18511be6e536e89645452d9
|
[
"MIT"
] | 5
|
2018-07-23T05:21:41.000Z
|
2018-08-08T05:00:42.000Z
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
def index(request):
return render(request, 'pensions/pensions_list.html')
| 27
| 57
| 0.802469
| 21
| 162
| 6.142857
| 0.714286
| 0.155039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117284
| 162
| 6
| 57
| 27
| 0.902098
| 0
| 0
| 0
| 0
| 0
| 0.165644
| 0.165644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
a8b92df4236143df4b3c01c4d048c78f795db432
| 127
|
py
|
Python
|
t.py
|
vansin/pix2code
|
caa3d2bcba9944ca9e9439f7551c8440c0087b8a
|
[
"Apache-2.0"
] | null | null | null |
t.py
|
vansin/pix2code
|
caa3d2bcba9944ca9e9439f7551c8440c0087b8a
|
[
"Apache-2.0"
] | null | null | null |
t.py
|
vansin/pix2code
|
caa3d2bcba9944ca9e9439f7551c8440c0087b8a
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
gpu_device_name = tf.test.gpu_device_name()
#print(gpu_device_name)
print(tf.test.is_gpu_available())
| 21.166667
| 43
| 0.811024
| 22
| 127
| 4.318182
| 0.5
| 0.284211
| 0.410526
| 0.378947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07874
| 127
| 6
| 44
| 21.166667
| 0.811966
| 0.173228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a8cbab364f4bef461d1cad7e23b10052916535a2
| 45
|
py
|
Python
|
src/flask_pyoidc/__init__.py
|
infohash/Flask-pyoidc
|
7d50c3e4bb5298044752e07aea8e2d2d00d29b1b
|
[
"Apache-2.0"
] | 64
|
2017-01-31T09:08:15.000Z
|
2021-12-21T21:05:45.000Z
|
src/flask_pyoidc/__init__.py
|
infohash/Flask-pyoidc
|
7d50c3e4bb5298044752e07aea8e2d2d00d29b1b
|
[
"Apache-2.0"
] | 99
|
2017-02-08T22:38:54.000Z
|
2022-03-31T22:03:27.000Z
|
src/flask_pyoidc/__init__.py
|
infohash/Flask-pyoidc
|
7d50c3e4bb5298044752e07aea8e2d2d00d29b1b
|
[
"Apache-2.0"
] | 33
|
2017-02-09T18:19:51.000Z
|
2021-12-24T17:48:52.000Z
|
from .flask_pyoidc import OIDCAuthentication
| 22.5
| 44
| 0.888889
| 5
| 45
| 7.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7633fdcafcc2291c659fc2d7c33f8c232d071a76
| 102
|
py
|
Python
|
simulations/ecg/p.py
|
sensomatrix/sensocore
|
f2e94b7dae6ab3e95785c4c1b363e49aab23ddab
|
[
"MIT"
] | 2
|
2019-04-02T00:17:57.000Z
|
2019-08-20T05:21:46.000Z
|
simulations/ecg/p.py
|
sensomatrix/sensocore
|
f2e94b7dae6ab3e95785c4c1b363e49aab23ddab
|
[
"MIT"
] | 13
|
2019-04-01T00:37:01.000Z
|
2020-10-04T00:50:01.000Z
|
simulations/ecg/p.py
|
sensomatrix/sensocore
|
f2e94b7dae6ab3e95785c4c1b363e49aab23ddab
|
[
"MIT"
] | null | null | null |
import math
def p(M_P,t_P,W_P,t):
return M_P * math.exp(-1 * ((t - t_P)/(math.sqrt(2) * W_P)) ** 2)
| 20.4
| 66
| 0.568627
| 26
| 102
| 2
| 0.461538
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.176471
| 102
| 4
| 67
| 25.5
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
4f50e42e960f390840fab47ebb82d938368fc417
| 1,688
|
py
|
Python
|
tests/test_gke.py
|
andrewpsp/astrobase
|
7adaa7c23bab3dbfdd90e18b5a10ed114b20bedd
|
[
"Apache-2.0"
] | 34
|
2021-04-12T02:56:07.000Z
|
2022-03-24T21:56:58.000Z
|
tests/test_gke.py
|
andrewpsp/astrobase
|
7adaa7c23bab3dbfdd90e18b5a10ed114b20bedd
|
[
"Apache-2.0"
] | 2
|
2021-12-29T04:07:24.000Z
|
2022-03-16T06:05:05.000Z
|
tests/test_gke.py
|
andrewpsp/astrobase
|
7adaa7c23bab3dbfdd90e18b5a10ed114b20bedd
|
[
"Apache-2.0"
] | 1
|
2021-05-30T03:59:07.000Z
|
2021-05-30T03:59:07.000Z
|
from unittest import mock
from tests.factories import ClusterFactory
cluster_examples = ClusterFactory()
def test_create_cluster(client):
with mock.patch(
"astrobase.apis.gke.GKEApi.make_create_request"
) as mock_gke_api_request:
mock_gke_api_request.return_value = {"name": "astrobase-gke-api"}
response = client.post("/gke", json=cluster_examples.gke_example())
assert response.status_code == 200
assert response.json().get("name") == "astrobase-gke-api"
def test_get_clusters(client):
with mock.patch(
"astrobase.apis.gke.GKEApi.make_get_request"
) as mock_gke_api_request:
mock_gke_api_request.return_value = {"name": "astrobase-gke-api"}
response = client.get(
"/gke?project_id=test&location=us-central1",
json=cluster_examples.gke_example(),
)
assert response.status_code == 200
assert response.json().get("name") == "astrobase-gke-api"
def test_describe_cluster(client):
with mock.patch(
"astrobase.apis.gke.GKEApi.make_describe_request"
) as mock_gke_api_request:
mock_gke_api_request.return_value = {"name": "astrobase-gke-api"}
response = client.get(
"/gke/astrobase-gke-api?project_id=test&location=us-central1"
)
assert response.status_code == 200
assert response.json().get("name") == "astrobase-gke-api"
def test_delete_clister(client):
with mock.patch("astrobase.apis.gke.GKEApi.make_delete_request"):
response = client.delete(
"/gke/astrobase-gke-api?project_id=test&location=us-central1"
)
assert response.status_code == 200
| 34.44898
| 75
| 0.675355
| 211
| 1,688
| 5.170616
| 0.208531
| 0.076994
| 0.109991
| 0.093492
| 0.824931
| 0.824931
| 0.796517
| 0.796517
| 0.796517
| 0.714024
| 0
| 0.011169
| 0.204384
| 1,688
| 48
| 76
| 35.166667
| 0.801191
| 0
| 0
| 0.526316
| 0
| 0
| 0.277251
| 0.200237
| 0
| 0
| 0
| 0
| 0.184211
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8c23ca0439fd9a2cd9b86c5ac82d687887c84d5e
| 145
|
py
|
Python
|
client_wishlist/wishlist/admin.py
|
EVolpert/client_whishlist
|
b2da64a53e978bc77bc4fb9a8c9b9dc4af66c5b1
|
[
"CC0-1.0"
] | null | null | null |
client_wishlist/wishlist/admin.py
|
EVolpert/client_whishlist
|
b2da64a53e978bc77bc4fb9a8c9b9dc4af66c5b1
|
[
"CC0-1.0"
] | 5
|
2021-03-30T14:20:02.000Z
|
2021-09-22T19:29:15.000Z
|
client_wishlist/wishlist/admin.py
|
EVolpert/client_whishlist
|
b2da64a53e978bc77bc4fb9a8c9b9dc4af66c5b1
|
[
"CC0-1.0"
] | 1
|
2020-08-18T16:35:12.000Z
|
2020-08-18T16:35:12.000Z
|
from django.contrib import admin
from wishlist.models import Wishlist
@admin.register(Wishlist)
class WishlistAdmin(admin.ModelAdmin):
pass
| 20.714286
| 38
| 0.813793
| 18
| 145
| 6.555556
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117241
| 145
| 7
| 39
| 20.714286
| 0.921875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8b22e4d1c248a5fd419cb4dc89a783f169ce6d7c
| 10,955
|
py
|
Python
|
tests/api/test_communities.py
|
LEv145/python-twitch-client
|
f1f45cf8afb1da1335b8f29023d4db5855f009bf
|
[
"MIT"
] | 171
|
2017-02-25T19:22:22.000Z
|
2022-02-13T22:23:14.000Z
|
tests/api/test_communities.py
|
LEv145/python-twitch-client
|
f1f45cf8afb1da1335b8f29023d4db5855f009bf
|
[
"MIT"
] | 55
|
2017-03-13T01:41:50.000Z
|
2022-02-11T20:38:54.000Z
|
tests/api/test_communities.py
|
LEv145/python-twitch-client
|
f1f45cf8afb1da1335b8f29023d4db5855f009bf
|
[
"MIT"
] | 68
|
2017-02-25T20:07:46.000Z
|
2022-02-04T16:48:47.000Z
|
import json
import pytest
import responses
from twitch.client import TwitchClient
from twitch.constants import BASE_URL
from twitch.exceptions import TwitchAttributeException
from twitch.resources import Community, User
example_community = {
"_id": "e9f17055-810f-4736-ba40-fba4ac541caa",
"name": "DallasTesterCommunity",
}
example_user = {
"_id": "44322889",
"name": "dallas",
}
@responses.activate
def test_get_by_name():
responses.add(
responses.GET,
"{}communities".format(BASE_URL),
body=json.dumps(example_community),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
community = client.communities.get_by_name("spongebob")
assert len(responses.calls) == 1
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
def test_get_by_id():
community_id = "abcd"
responses.add(
responses.GET,
"{}communities/{}".format(BASE_URL, community_id),
body=json.dumps(example_community),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
community = client.communities.get_by_id(community_id)
assert len(responses.calls) == 1
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
def test_update():
community_id = "abcd"
responses.add(
responses.PUT,
"{}communities/{}".format(BASE_URL, community_id),
body=json.dumps(example_community),
status=204,
content_type="application/json",
)
client = TwitchClient("client id")
client.communities.update(community_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_top():
response = {"_cursor": "MTA=", "_total": 100, "communities": [example_community]}
responses.add(
responses.GET,
"{}communities/top".format(BASE_URL),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id")
communities = client.communities.get_top()
assert len(responses.calls) == 1
assert len(communities) == 1
community = communities[0]
assert isinstance(community, Community)
assert community.id == example_community["_id"]
assert community.name == example_community["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_top_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.communities.get_top(**kwargs)
@responses.activate
def test_get_banned_users():
community_id = "abcd"
response = {"_cursor": "", "banned_users": [example_user]}
responses.add(
responses.GET,
"{}communities/{}/bans".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
users = client.communities.get_banned_users(community_id)
assert len(responses.calls) == 1
assert len(users) == 1
user = users[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_banned_users_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.communities.get_banned_users("1234", **kwargs)
@responses.activate
def test_ban_user():
community_id = "abcd"
user_id = 1234
responses.add(
responses.PUT,
"{}communities/{}/bans/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.ban_user(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_unban_user():
community_id = "abcd"
user_id = 1234
responses.add(
responses.DELETE,
"{}communities/{}/bans/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.unban_user(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_create_avatar_image():
community_id = "abcd"
responses.add(
responses.POST,
"{}communities/{}/images/avatar".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.create_avatar_image(community_id, "imagecontent")
assert len(responses.calls) == 1
@responses.activate
def test_delete_avatar_image():
community_id = "abcd"
responses.add(
responses.DELETE,
"{}communities/{}/images/avatar".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_avatar_image(community_id)
assert len(responses.calls) == 1
@responses.activate
def test_create_cover_image():
community_id = "abcd"
responses.add(
responses.POST,
"{}communities/{}/images/cover".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.create_cover_image(community_id, "imagecontent")
assert len(responses.calls) == 1
@responses.activate
def test_delete_cover_image():
community_id = "abcd"
responses.add(
responses.DELETE,
"{}communities/{}/images/cover".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_cover_image(community_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_moderators():
community_id = "abcd"
response = {"moderators": [example_user]}
responses.add(
responses.GET,
"{}communities/{}/moderators".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
moderators = client.communities.get_moderators(community_id)
assert len(responses.calls) == 1
assert len(moderators) == 1
user = moderators[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
def test_add_moderator():
community_id = "abcd"
user_id = 12345
responses.add(
responses.PUT,
"{}communities/{}/moderators/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.add_moderator(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_delete_moderator():
community_id = "abcd"
user_id = 12345
responses.add(
responses.DELETE,
"{}communities/{}/moderators/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_moderator(community_id, user_id)
assert len(responses.calls) == 1
@responses.activate
def test_get_permissions():
community_id = "abcd"
response = {"ban": True, "timeout": True, "edit": True}
responses.add(
responses.GET,
"{}communities/{}/permissions".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
permissions = client.communities.get_permissions(community_id)
assert len(responses.calls) == 1
assert isinstance(permissions, dict)
assert permissions["ban"] is True
@responses.activate
def test_report_violation():
community_id = "abcd"
responses.add(
responses.POST,
"{}communities/{}/report_channel".format(BASE_URL, community_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.report_violation(community_id, 12345)
assert len(responses.calls) == 1
@responses.activate
def test_get_timed_out_users():
community_id = "abcd"
response = {"_cursor": "", "timed_out_users": [example_user]}
responses.add(
responses.GET,
"{}communities/{}/timeouts".format(BASE_URL, community_id),
body=json.dumps(response),
status=200,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
users = client.communities.get_timed_out_users(community_id)
assert len(responses.calls) == 1
assert len(users) == 1
user = users[0]
assert isinstance(user, User)
assert user.id == example_user["_id"]
assert user.name == example_user["name"]
@responses.activate
@pytest.mark.parametrize("param,value", [("limit", 101)])
def test_get_timed_out_users_raises_if_wrong_params_are_passed_in(param, value):
client = TwitchClient("client id", "oauth token")
kwargs = {param: value}
with pytest.raises(TwitchAttributeException):
client.communities.get_timed_out_users("1234", **kwargs)
@responses.activate
def test_add_timed_out_user():
community_id = "abcd"
user_id = 12345
responses.add(
responses.PUT,
"{}communities/{}/timeouts/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.add_timed_out_user(community_id, user_id, 5)
assert len(responses.calls) == 1
@responses.activate
def test_delete_timed_out_user():
community_id = "abcd"
user_id = 12345
responses.add(
responses.DELETE,
"{}communities/{}/timeouts/{}".format(BASE_URL, community_id, user_id),
status=204,
content_type="application/json",
)
client = TwitchClient("client id", "oauth token")
client.communities.delete_timed_out_user(community_id, user_id)
assert len(responses.calls) == 1
| 26.083333
| 85
| 0.672478
| 1,245
| 10,955
| 5.706827
| 0.085141
| 0.089796
| 0.074314
| 0.080507
| 0.880366
| 0.855735
| 0.827445
| 0.797044
| 0.76228
| 0.747361
| 0
| 0.018825
| 0.199909
| 10,955
| 419
| 86
| 26.145585
| 0.791786
| 0
| 0
| 0.68932
| 0
| 0
| 0.141031
| 0.042994
| 0
| 0
| 0
| 0
| 0.139159
| 1
| 0.071197
| false
| 0.009709
| 0.022654
| 0
| 0.093851
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8c6672e903fbd2db5155939f1b1597017c2c194e
| 40
|
py
|
Python
|
pyhcl/lib/__init__.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | 1
|
2021-12-10T14:02:54.000Z
|
2021-12-10T14:02:54.000Z
|
pyhcl/lib/__init__.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | null | null | null |
pyhcl/lib/__init__.py
|
raybdzhou/PyChip-py-hcl
|
08edc6ad4d2978eb417482f6f92678f8f9a1e3c7
|
[
"MIT"
] | 1
|
2022-03-04T03:36:01.000Z
|
2022-03-04T03:36:01.000Z
|
from .fifo.fifo import BubbleFifoFactory
| 40
| 40
| 0.875
| 5
| 40
| 7
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8cef041bcd91a43c1443fa376c88bd6bfb52ff6a
| 23
|
py
|
Python
|
pixel_table/modes/rain/__init__.py
|
Spooner/pixel-table
|
87ac04adbb74702bee3dcaa5c6bded7786cf73e7
|
[
"MIT"
] | null | null | null |
pixel_table/modes/rain/__init__.py
|
Spooner/pixel-table
|
87ac04adbb74702bee3dcaa5c6bded7786cf73e7
|
[
"MIT"
] | null | null | null |
pixel_table/modes/rain/__init__.py
|
Spooner/pixel-table
|
87ac04adbb74702bee3dcaa5c6bded7786cf73e7
|
[
"MIT"
] | null | null | null |
from .rain import Rain
| 11.5
| 22
| 0.782609
| 4
| 23
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
50f2010022ed389ce74c9ce90b8243696999822e
| 17,354
|
py
|
Python
|
tests/gold_tests/pluginTest/stek_share/stek_share.test.py
|
cmcfarlen/trafficserver
|
2aa1d3106398eb082e5a454212b0273c63d5f69d
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/pluginTest/stek_share/stek_share.test.py
|
cmcfarlen/trafficserver
|
2aa1d3106398eb082e5a454212b0273c63d5f69d
|
[
"Apache-2.0"
] | null | null | null |
tests/gold_tests/pluginTest/stek_share/stek_share.test.py
|
cmcfarlen/trafficserver
|
2aa1d3106398eb082e5a454212b0273c63d5f69d
|
[
"Apache-2.0"
] | null | null | null |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
Test.Summary = 'Test the STEK Share plugin'
Test.testName = "stek_share"
Test.SkipUnless(Condition.PluginExists('stek_share.so'))
server = Test.MakeOriginServer('server')
ts1 = Test.MakeATSProcess("ts1", select_ports=True, enable_tls=True)
ts2 = Test.MakeATSProcess("ts2", select_ports=True, enable_tls=True)
ts3 = Test.MakeATSProcess("ts3", select_ports=True, enable_tls=True)
ts4 = Test.MakeATSProcess("ts4", select_ports=True, enable_tls=True)
ts5 = Test.MakeATSProcess("ts5", select_ports=True, enable_tls=True)
Test.Setup.Copy('ssl/self_signed.crt')
Test.Setup.Copy('ssl/self_signed.key')
Test.Setup.Copy('server_list.yaml')
cert_path = os.path.join(Test.RunDirectory, 'self_signed.crt')
key_path = os.path.join(Test.RunDirectory, 'self_signed.key')
server_list_path = os.path.join(Test.RunDirectory, 'server_list.yaml')
request_header1 = {
'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
response_header1 = {
'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
'timestamp': '1469733493.993',
'body': 'curl test'
}
server.addResponse('sessionlog.json', request_header1, response_header1)
stek_share_conf_path_1 = os.path.join(ts1.Variables.CONFIGDIR, 'stek_share_conf.yaml')
stek_share_conf_path_2 = os.path.join(ts2.Variables.CONFIGDIR, 'stek_share_conf.yaml')
stek_share_conf_path_3 = os.path.join(ts3.Variables.CONFIGDIR, 'stek_share_conf.yaml')
stek_share_conf_path_4 = os.path.join(ts4.Variables.CONFIGDIR, 'stek_share_conf.yaml')
stek_share_conf_path_5 = os.path.join(ts5.Variables.CONFIGDIR, 'stek_share_conf.yaml')
ts1.Disk.File(stek_share_conf_path_1, id="stek_share_conf_1", typename="ats:config")
ts2.Disk.File(stek_share_conf_path_2, id="stek_share_conf_2", typename="ats:config")
ts3.Disk.File(stek_share_conf_path_3, id="stek_share_conf_3", typename="ats:config")
ts4.Disk.File(stek_share_conf_path_4, id="stek_share_conf_4", typename="ats:config")
ts5.Disk.File(stek_share_conf_path_5, id="stek_share_conf_5", typename="ats:config")
ts1.Disk.stek_share_conf_1.AddLines([
'server_id: 1',
'address: 127.0.0.1',
'port: 10001',
'asio_thread_pool_size: 4',
'heart_beat_interval: 100',
'election_timeout_lower_bound: 200',
'election_timeout_upper_bound: 400',
'reserved_log_items: 5',
'snapshot_distance: 5',
'client_req_timeout: 3000', # this is in milliseconds
'key_update_interval: 3600', # this is in seconds
'server_list_file: {0}'.format(server_list_path),
'root_cert_file: {0}'.format(cert_path),
'server_cert_file: {0}'.format(cert_path),
'server_key_file: {0}'.format(key_path),
'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share'
])
ts2.Disk.stek_share_conf_2.AddLines([
'server_id: 2',
'address: 127.0.0.1',
'port: 10002',
'asio_thread_pool_size: 4',
'heart_beat_interval: 100',
'election_timeout_lower_bound: 200',
'election_timeout_upper_bound: 400',
'reserved_log_items: 5',
'snapshot_distance: 5',
'client_req_timeout: 3000', # this is in milliseconds
'key_update_interval: 3600', # this is in seconds
'server_list_file: {0}'.format(server_list_path),
'root_cert_file: {0}'.format(cert_path),
'server_cert_file: {0}'.format(cert_path),
'server_key_file: {0}'.format(key_path),
'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share'
])
ts3.Disk.stek_share_conf_3.AddLines([
'server_id: 3',
'address: 127.0.0.1',
'port: 10003',
'asio_thread_pool_size: 4',
'heart_beat_interval: 100',
'election_timeout_lower_bound: 200',
'election_timeout_upper_bound: 400',
'reserved_log_items: 5',
'snapshot_distance: 5',
'client_req_timeout: 3000', # this is in milliseconds
'key_update_interval: 3600', # this is in seconds
'server_list_file: {0}'.format(server_list_path),
'root_cert_file: {0}'.format(cert_path),
'server_cert_file: {0}'.format(cert_path),
'server_key_file: {0}'.format(key_path),
'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share'
])
ts4.Disk.stek_share_conf_4.AddLines([
'server_id: 4',
'address: 127.0.0.1',
'port: 10004',
'asio_thread_pool_size: 4',
'heart_beat_interval: 100',
'election_timeout_lower_bound: 200',
'election_timeout_upper_bound: 400',
'reserved_log_items: 5',
'snapshot_distance: 5',
'client_req_timeout: 3000', # this is in milliseconds
'key_update_interval: 3600', # this is in seconds
'server_list_file: {0}'.format(server_list_path),
'root_cert_file: {0}'.format(cert_path),
'server_cert_file: {0}'.format(cert_path),
'server_key_file: {0}'.format(key_path),
'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share'
])
ts5.Disk.stek_share_conf_5.AddLines([
'server_id: 5',
'address: 127.0.0.1',
'port: 10005',
'asio_thread_pool_size: 4',
'heart_beat_interval: 100',
'election_timeout_lower_bound: 200',
'election_timeout_upper_bound: 400',
'reserved_log_items: 5',
'snapshot_distance: 5',
'client_req_timeout: 3000', # this is in milliseconds
'key_update_interval: 3600', # this is in seconds
'server_list_file: {0}'.format(server_list_path),
'root_cert_file: {0}'.format(cert_path),
'server_cert_file: {0}'.format(cert_path),
'server_key_file: {0}'.format(key_path),
'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share'
])
ts1.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite':
'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'})
ts1.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_1))
ts1.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key')
ts1.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port))
ts2.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite':
'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'})
ts2.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_2))
ts2.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key')
ts2.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port))
ts3.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite':
'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'})
ts3.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_3))
ts3.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key')
ts3.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port))
ts4.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite':
'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'})
ts4.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_4))
ts4.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key')
ts4.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port))
ts5.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite':
'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'})
ts5.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_5))
ts5.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key')
ts5.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port))
def check_session(ev, test):
retval = False
f = open(test.GetContent(ev), 'r')
err = "Session ids match"
if not f:
err = "Failed to open {0}".format(openssl_output)
return (retval, "Check that session ids match", err)
content = f.read()
match = re.findall('Session-ID: ([0-9A-F]+)', content)
if match:
if all(i == j for i, j in zip(match, match[1:])):
err = "{0} reused successfully {1} times".format(match[0], len(match) - 1)
retval = True
else:
err = "Session is not being reused as expected"
else:
err = "Didn't find session id"
return (retval, "Check that session ids match", err)
tr1 = Test.AddTestRun('Basic Curl test, and give it enough time for all ATS to start up and sync STEK')
tr1.Processes.Default.Command = 'sleep 10 && curl https://127.0.0.1:{0} -k'.format(ts1.Variables.ssl_port)
tr1.Processes.Default.ReturnCode = 0
tr1.Processes.Default.StartBefore(server)
tr1.Processes.Default.StartBefore(ts1)
tr1.Processes.Default.StartBefore(ts2)
tr1.Processes.Default.StartBefore(ts3)
tr1.Processes.Default.StartBefore(ts4)
tr1.Processes.Default.StartBefore(ts5)
tr1.Processes.Default.Streams.All = Testers.ContainsExpression('curl test', 'Making sure the basics still work')
ts1.Streams.All = Testers.ContainsExpression('Generate initial STEK succeeded', 'should succeed')
ts2.Streams.All = Testers.ContainsExpression('Generate initial STEK succeeded', 'should succeed')
ts3.Streams.All = Testers.ContainsExpression('Generate initial STEK succeeded', 'should succeed')
ts4.Streams.All = Testers.ContainsExpression('Generate initial STEK succeeded', 'should succeed')
ts5.Streams.All = Testers.ContainsExpression('Generate initial STEK succeeded', 'should succeed')
tr1.StillRunningAfter = server
tr1.StillRunningAfter += ts1
tr1.StillRunningAfter += ts2
tr1.StillRunningAfter += ts3
tr1.StillRunningAfter += ts4
tr1.StillRunningAfter += ts5
tr2 = Test.AddTestRun("TLSv1.2 Session Ticket")
tr2.Command = \
'echo -e "GET / HTTP/1.1\r\n" | openssl s_client -tls1_2 -connect 127.0.0.1:{0} -sess_out {5} && ' \
'echo -e "GET / HTTP/1.1\r\n" | openssl s_client -tls1_2 -connect 127.0.0.1:{0} -sess_in {5} && ' \
'echo -e "GET / HTTP/1.1\r\n" | openssl s_client -tls1_2 -connect 127.0.0.1:{1} -sess_in {5} && ' \
'echo -e "GET / HTTP/1.1\r\n" | openssl s_client -tls1_2 -connect 127.0.0.1:{2} -sess_in {5} && ' \
'echo -e "GET / HTTP/1.1\r\n" | openssl s_client -tls1_2 -connect 127.0.0.1:{3} -sess_in {5} && ' \
'echo -e "GET / HTTP/1.1\r\n" | openssl s_client -tls1_2 -connect 127.0.0.1:{4} -sess_in {5}' \
.format(
ts1.Variables.ssl_port,
ts2.Variables.ssl_port,
ts3.Variables.ssl_port,
ts4.Variables.ssl_port,
ts5.Variables.ssl_port,
os.path.join(Test.RunDirectory, 'sess.dat')
)
tr2.ReturnCode = 0
tr2.Processes.Default.Streams.All.Content = Testers.Lambda(check_session)
| 68.054902
| 719
| 0.737006
| 2,755
| 17,354
| 4.48167
| 0.115789
| 0.053454
| 0.045355
| 0.032397
| 0.776788
| 0.766664
| 0.728841
| 0.728841
| 0.711185
| 0.711185
| 0
| 0.075361
| 0.09773
| 17,354
| 254
| 720
| 68.322835
| 0.713182
| 0.056471
| 0
| 0.409524
| 0
| 0.085714
| 0.604345
| 0.39492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004762
| false
| 0
| 0.009524
| 0
| 0.02381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0f9903717282ace68a922c0e9fff62361a606a72
| 5,637
|
py
|
Python
|
graphtheory/algorithms/topsort.py
|
gitter-badger/graphs-dict
|
2be1a5b140feb050eec799d6cadf6de5eef01745
|
[
"BSD-3-Clause"
] | 36
|
2015-09-20T20:55:39.000Z
|
2021-09-20T05:49:03.000Z
|
graphtheory/algorithms/topsort.py
|
gitter-badger/graphs-dict
|
2be1a5b140feb050eec799d6cadf6de5eef01745
|
[
"BSD-3-Clause"
] | 6
|
2016-03-25T21:41:46.000Z
|
2020-02-12T03:18:59.000Z
|
graphtheory/algorithms/topsort.py
|
gitter-badger/graphs-dict
|
2be1a5b140feb050eec799d6cadf6de5eef01745
|
[
"BSD-3-Clause"
] | 9
|
2016-09-12T07:57:27.000Z
|
2022-03-21T16:15:39.000Z
|
#!/usr/bin/python
try:
from Queue import Queue
except ImportError: # Python 3
from queue import Queue
xrange = range
#from graphtheory.traversing.dfs import DFSWithRecursion as SimpleDFS
from graphtheory.traversing.dfs import SimpleDFS
class TopologicalSortDFS:
"""Topological sorting of nodes from a dag using DFS.
Attributes
----------
graph : input directed acyclic graph
sorted_nodes : list of sorted nodes
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
https://en.wikipedia.org/wiki/Topological_sorting
"""
def __init__(self, graph):
"""The algorithm initialization."""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.sorted_nodes = []
def run(self):
"""Executable pseudocode."""
algorithm = SimpleDFS(self.graph)
algorithm.run(post_action=lambda node: self.sorted_nodes.append(node))
self.sorted_nodes.reverse()
class TopologicalSortQueue:
"""Topological sorting of nodes from a dag (Kahn's algorithm).
Attributes
----------
graph : input directed acyclic graph
sorted_nodes : list of sorted nodes
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
https://en.wikipedia.org/wiki/Topological_sorting
"""
def __init__(self, graph):
"""The algorithm initialization."""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.sorted_nodes = list()
def run(self):
"""Executable pseudocode."""
Q = Queue() # queue or stack or set
# Calculate indegree of nodes.
inedges = dict((node, 0) for node in self.graph.iternodes())
for edge in self.graph.iteredges():
inedges[edge.target] += 1
for node in self.graph.iternodes():
if inedges[node] == 0:
Q.put(node)
while not Q.empty():
node = Q.get()
self.sorted_nodes.append(node)
# Remove all outedges.
for edge in self.graph.iteroutedges(node):
inedges[edge.target] -= 1
if inedges[edge.target] == 0:
Q.put(edge.target)
class TopologicalSortSet:
"""Topological sorting of nodes from a dag (Kahn's algorithm).
Attributes
----------
graph : input directed acyclic graph
sorted_nodes : list of sorted nodes
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
https://en.wikipedia.org/wiki/Topological_sorting
"""
def __init__(self, graph):
"""The algorithm initialization."""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.sorted_nodes = []
def run(self):
"""Executable pseudocode."""
Q = set() # queue or stack or set
# Calculate indegree of nodes.
inedges = dict((node, 0) for node in self.graph.iternodes())
for edge in self.graph.iteredges():
inedges[edge.target] += 1
for node in self.graph.iternodes():
if inedges[node] == 0:
Q.add(node)
while Q:
node = Q.pop()
self.sorted_nodes.append(node)
# Remove all outedges.
for edge in self.graph.iteroutedges(node):
inedges[edge.target] -= 1
if inedges[edge.target] == 0:
Q.add(edge.target)
class TopologicalSortList:
"""Topological sorting of nodes from a dag (Kahn's algorithm).
Attributes
----------
graph : input directed acyclic graph
sorted_nodes : list of sorted nodes
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
https://en.wikipedia.org/wiki/Topological_sorting
"""
def __init__(self, graph):
"""The algorithm initialization."""
if not graph.is_directed():
raise ValueError("the graph is not directed")
self.graph = graph
self.sorted_nodes = [None] * self.graph.v()
def run(self):
"""Executable pseudocode."""
# Calculate indegree of nodes.
inedges = dict((node, 0) for node in self.graph.iternodes())
for edge in self.graph.iteredges():
inedges[edge.target] += 1
qstart = 0 # first to get
qend = 0 # first free place
for node in self.graph.iternodes():
if inedges[node] == 0:
self.sorted_nodes[qend] = node
qend += 1
for step in xrange(self.graph.v()):
source = self.sorted_nodes[qstart]
qstart += 1
# Remove all outedges.
for edge in self.graph.iteroutedges(source):
inedges[edge.target] -= 1
if inedges[edge.target] == 0:
self.sorted_nodes[qend] = edge.target
qend += 1
# EOF
| 30.144385
| 78
| 0.570694
| 652
| 5,637
| 4.872699
| 0.190184
| 0.065156
| 0.041549
| 0.024551
| 0.830028
| 0.779352
| 0.769279
| 0.758892
| 0.758892
| 0.732137
| 0
| 0.00963
| 0.318432
| 5,637
| 186
| 79
| 30.306452
| 0.817283
| 0.367571
| 0
| 0.597561
| 0
| 0
| 0.03095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.04878
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ba01e35a3224231d896e89ab978be9af3db5660b
| 25
|
py
|
Python
|
S1E1/Sample Code/my_first_program.py
|
SMPParthaS/Python-For-Beginners
|
d3d8d08c405a9358d64ae23d00d574654532b96a
|
[
"MIT"
] | null | null | null |
S1E1/Sample Code/my_first_program.py
|
SMPParthaS/Python-For-Beginners
|
d3d8d08c405a9358d64ae23d00d574654532b96a
|
[
"MIT"
] | null | null | null |
S1E1/Sample Code/my_first_program.py
|
SMPParthaS/Python-For-Beginners
|
d3d8d08c405a9358d64ae23d00d574654532b96a
|
[
"MIT"
] | 2
|
2020-11-09T19:02:47.000Z
|
2020-12-09T19:48:05.000Z
|
print("This is the way!")
| 25
| 25
| 0.68
| 5
| 25
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
e849c439b1a082baae91c909db599ebf6e20023c
| 72,480
|
py
|
Python
|
tests/test_elmo_ner.py
|
K-Mike/deep_ner
|
ffe1bcd64f7e38066866daa0cdd943300ba9ed4e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_elmo_ner.py
|
K-Mike/deep_ner
|
ffe1bcd64f7e38066866daa0cdd943300ba9ed4e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_elmo_ner.py
|
K-Mike/deep_ner
|
ffe1bcd64f7e38066866daa0cdd943300ba9ed4e
|
[
"Apache-2.0"
] | null | null | null |
import copy
import gc
import os
import pickle
import re
import sys
import tempfile
import unittest
import numpy as np
from sklearn.exceptions import NotFittedError
try:
from deep_ner.elmo_ner import ELMo_NER
from deep_ner.utils import load_dataset
from deep_ner.quality import calculate_prediction_quality
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from deep_ner.elmo_ner import ELMo_NER
from deep_ner.utils import load_dataset
from deep_ner.quality import calculate_prediction_quality
class TestELMoNER(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ELMO_HUB_MODULE = 'http://files.deeppavlov.ai/deeppavlov_data/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz'
def tearDown(self):
if hasattr(self, 'ner'):
del self.ner
if hasattr(self, 'another_ner'):
del self.another_ner
if hasattr(self, 'temp_file_name'):
if os.path.isfile(self.temp_file_name):
os.remove(self.temp_file_name)
def test_creation(self):
self.ner = ELMo_NER(elmo_hub_module_handle=self.ELMO_HUB_MODULE)
self.assertIsInstance(self.ner, ELMo_NER)
self.assertTrue(hasattr(self.ner, 'batch_size'))
self.assertTrue(hasattr(self.ner, 'lr'))
self.assertTrue(hasattr(self.ner, 'l2_reg'))
self.assertTrue(hasattr(self.ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.ner, 'max_epochs'))
self.assertTrue(hasattr(self.ner, 'patience'))
self.assertTrue(hasattr(self.ner, 'random_seed'))
self.assertTrue(hasattr(self.ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.ner, 'max_seq_length'))
self.assertTrue(hasattr(self.ner, 'validation_fraction'))
self.assertTrue(hasattr(self.ner, 'verbose'))
self.assertIsInstance(self.ner.batch_size, int)
self.assertIsInstance(self.ner.lr, float)
self.assertIsInstance(self.ner.l2_reg, float)
self.assertIsInstance(self.ner.finetune_elmo, bool)
self.assertIsInstance(self.ner.max_epochs, int)
self.assertIsInstance(self.ner.patience, int)
self.assertIsNone(self.ner.random_seed)
self.assertIsInstance(self.ner.gpu_memory_frac, float)
self.assertIsInstance(self.ner.max_seq_length, int)
self.assertIsInstance(self.ner.validation_fraction, float)
self.assertIsInstance(self.ner.verbose, bool)
def test_check_params_positive(self):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512, lr=1e-3,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
self.assertTrue(True)
def test_check_params_negative001(self):
true_err_msg = re.escape('`elmo_hub_module_handle` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
finetune_elmo=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4, validation_fraction=0.1,
max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative002(self):
true_err_msg = re.escape('`elmo_hub_module_handle` is wrong! Expected `{0}`, got `{1}`.'.format(
type('abc'), type(123)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=1, finetune_elmo=True, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4,
validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative003(self):
true_err_msg = re.escape('`batch_size` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, max_seq_length=512, lr=1e-3,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative004(self):
true_err_msg = re.escape('`batch_size` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size='32', max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative005(self):
true_err_msg = re.escape('`batch_size` is wrong! Expected a positive integer value, but -3 is not positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=-3, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative006(self):
true_err_msg = re.escape('`max_epochs` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative007(self):
true_err_msg = re.escape('`max_epochs` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs='10', patience=3,
gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative008(self):
true_err_msg = re.escape('`max_epochs` is wrong! Expected a positive integer value, but -3 is not positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=-3, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative009(self):
true_err_msg = re.escape('`patience` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative010(self):
true_err_msg = re.escape('`patience` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience='3', gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative011(self):
true_err_msg = re.escape('`patience` is wrong! Expected a positive integer value, but -3 is not positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=-3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative012(self):
true_err_msg = re.escape('`max_seq_length` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE,
finetune_elmo=True, batch_size=32, lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10,
patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative013(self):
true_err_msg = re.escape('`max_seq_length` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length='512',
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative014(self):
true_err_msg = re.escape('`max_seq_length` is wrong! Expected a positive integer value, but -3 is not '
'positive.')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=-3,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative015(self):
true_err_msg = re.escape('`validation_fraction` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative016(self):
true_err_msg = re.escape('`validation_fraction` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction='0.1', max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative017(self):
true_err_msg = '`validation_fraction` is wrong! Expected a positive floating-point value less than 1.0, but ' \
'{0} is not positive.'.format(-0.1)
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=-0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative018(self):
true_err_msg = '`validation_fraction` is wrong! Expected a positive floating-point value less than 1.0, but ' \
'{0} is not less than 1.0.'.format(1.1)
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=1.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative019(self):
true_err_msg = re.escape('`gpu_memory_frac` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, verbose=False, random_seed=42
)
def test_check_params_negative020(self):
true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac='1.0',
verbose=False, random_seed=42
)
def test_check_params_negative021(self):
true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected a floating-point value in the (0.0, 1.0], '
'but {0} is not proper.'.format(-1.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=-1.0,
verbose=False, random_seed=42
)
def test_check_params_negative022(self):
true_err_msg = re.escape('`gpu_memory_frac` is wrong! Expected a floating-point value in the (0.0, 1.0], '
'but {0} is not proper.'.format(1.3))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.3,
verbose=False, random_seed=42
)
def test_check_params_negative023(self):
true_err_msg = re.escape('`lr` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative024(self):
true_err_msg = re.escape('`lr` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr='1e-3', l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative025(self):
true_err_msg = re.escape('`lr` is wrong! Expected a positive floating-point value, but {0} is not '
'positive.'.format(0.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=0.0, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative026(self):
true_err_msg = re.escape('`lr` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative027(self):
true_err_msg = re.escape('`lr` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr='1e-3', l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative028(self):
true_err_msg = re.escape('`lr` is wrong! Expected a positive floating-point value, but {0} is not '
'positive.'.format(0.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=0.0, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative029(self):
true_err_msg = re.escape('`l2_reg` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False,
random_seed=42
)
def test_check_params_negative030(self):
true_err_msg = re.escape('`l2_reg` is wrong! Expected `{0}`, got `{1}`.'.format(
type(3.5), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg='1e-4', validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative031(self):
true_err_msg = re.escape('`l2_reg` is wrong! Expected a non-negative floating-point value, but {0} is '
'negative.'.format(-2.0))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=-2.0, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative032(self):
true_err_msg = re.escape('`finetune_elmo` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, batch_size=32, max_seq_length=512, lr=1e-3, l2_reg=1e-4,
validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0, verbose=False, random_seed=42
)
def test_check_params_negative033(self):
true_err_msg = re.escape('`finetune_elmo` is wrong! Expected `{0}`, got `{1}`.'.format(
type(True), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo='True', batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose=False, random_seed=42
)
def test_check_params_negative034(self):
true_err_msg = re.escape('`verbose` is not specified!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
random_seed=42
)
def test_check_params_negative035(self):
true_err_msg = re.escape('`verbose` is wrong! Expected `{0}`, got `{1}`.'.format(
type(True), type('3')))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_params(
elmo_hub_module_handle=self.ELMO_HUB_MODULE, finetune_elmo=True, batch_size=32, max_seq_length=512,
lr=1e-3, l2_reg=1e-4, validation_fraction=0.1, max_epochs=10, patience=3, gpu_memory_frac=1.0,
verbose='False', random_seed=42
)
def test_check_X_positive(self):
X = ['abc', 'defgh', '4wdffg']
ELMo_NER.check_X(X, 'X_train')
self.assertTrue(True)
def test_check_X_negative01(self):
X = {'abc', 'defgh', '4wdffg'}
true_err_msg = re.escape('`X_train` is wrong, because it is not list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_X(X, 'X_train')
def test_check_X_negative02(self):
X = np.random.uniform(-1.0, 1.0, (10, 2))
true_err_msg = re.escape('`X_train` is wrong, because it is not 1-D list!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_X(X, 'X_train')
def test_check_X_negative03(self):
X = ['abc', 23, '4wdffg']
true_err_msg = re.escape('Item 1 of `X_train` is wrong, because it is not string-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_X(X, 'X_train')
def text_check_Xy_positive(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_classes_list = ('LOC', 'ORG', 'PER')
self.assertEqual(true_classes_list, ELMo_NER.check_Xy(X, 'X_train', y, 'y_train'))
def text_check_Xy_negative01(self):
X = {
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
}
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('`X_train` is wrong, because it is not list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative02(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = {
'1': {
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
'2': {
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
}
true_err_msg = re.escape('`y_train` is wrong, because it is not a list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative03(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = np.random.uniform(-1.0, 1.0, (10, 2))
true_err_msg = re.escape('`y_train` is wrong, because it is not 1-D list!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative04(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
},
{
'LOC': [(17, 24), (117, 130)]
}
]
true_err_msg = re.escape('Length of `X_train` does not correspond to length of `y_train`! 2 != 3')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative05(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
4
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because it is not a dictionary-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative06(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
1: [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because its key `1` is not a string-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative07(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'O': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `O` incorrectly specifies a named '
'entity!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative08(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'123': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `123` incorrectly specifies a named '
'entity!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative09(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'loc': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because its key `loc` incorrectly specifies a named '
'entity!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative10(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': {1, 2}
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because its value `{0}` is not a list-like '
'object!'.format(y[0]['PER']))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative11(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), 63],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because named entity bounds `63` are not specified as '
'list-like object!')
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative12(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77, 81)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 1 of `y_train` is wrong, because named entity bounds `{0}` are not specified as '
'2-D list!'.format((63, 77, 81)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative13(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (219, 196)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '
'incorrect!'.format((219, 196)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative14(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(122, 137), (196, 519)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '
'incorrect!'.format((196, 519)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def text_check_Xy_negative15(self):
X = [
'Встреча с послом Италии в миде Грузии. По инициативе итальянской стороны чрезвычайный и полномочный посол '
'Италии в Грузии Виторио Сандали встретился с заместителем министра иностранных дел Грузии Александром '
'Налбандовым.',
'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози. Как было объявлено, '
'президент Франции прибыл в Вашингтон, чтобы обсудить с главой администрации США ряд насущных проблем, '
'главное место среди которых занимает состояние мировой экономики и безопасность.'
]
y = [
{
'ORG': [(26, 37)],
'PER': [(-1, 137), (196, 219)]
},
{
'ORG': [(126, 135)],
'PER': [(0, 11), (63, 77)],
'LOC': [(24, 34), (161, 178)]
}
]
true_err_msg = re.escape('Item 0 of `y_train` is wrong, because named entity bounds `{0}` are '
'incorrect!'.format((-1, 137)))
with self.assertRaisesRegex(ValueError, true_err_msg):
ELMo_NER.check_Xy(X, 'X_train', y, 'y_train')
def test_calculate_bounds_of_tokens_positive01(self):
source_text = 'Совершенно новую технологию перекачки российской водки за рубеж начали использовать ' \
'контрабандисты.'
tokenized_text = ['Совершенно', 'новую', 'технологию', 'перекачки', 'российской', 'водки', 'за', 'рубеж',
'начали', 'использовать', 'контрабандисты', '.']
true_bounds = [(0, 10), (11, 16), (17, 27), (28, 37), (38, 48), (49, 54), (55, 57), (58, 63), (64, 70),
(71, 83), (84, 98), (98, 99)]
self.assertEqual(true_bounds, ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text))
def test_calculate_bounds_of_tokens_positive02(self):
source_text = 'Один из последних представителей клады, тираннозавр (Tyrannosaurus rex), живший 66–67 ' \
'миллионов лет назад, был одним из крупнейших когда-либо живших сухопутных хищников'
tokenized_text = ['Один', 'из', 'последних', 'представителей', 'клады', ',', 'тираннозавр', '(',
'Tyrannosaurus', 'rex', ')', ',', 'живший', '66', '–', '67', 'миллионов', 'лет', 'назад', ',',
'был', 'одним', 'из', 'крупнейших', 'когда', '-', 'либо', 'живших', 'сухопутных', 'хищников']
true_bounds = [(0, 4), (5, 7), (8, 17), (18, 32), (33, 38), (38, 39), (40, 51), (52, 53), (53, 66), (67, 70),
(70, 71), (71, 72), (73, 79), (80, 82), (82, 83), (83, 85), (86, 95), (96, 99), (100, 105),
(105, 106), (107, 110), (111, 116), (117, 119), (120, 130), (131, 136), (136, 137), (137, 141),
(142, 148), (149, 159), (160, 168)]
self.assertEqual(true_bounds, ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text))
def test_detect_token_labels_positive01(self):
source_text = 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози.'
tokenized_text = ['Барак', 'Обама', 'принимает', 'в', 'Белом', 'доме', 'своего',
'французского', 'коллегу', 'Николя', 'Саркози', '.']
token_bounds = ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text)
indices_of_named_entities = np.array(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 0],
dtype=np.int32
)
label_IDs = {1: 1, 2: 2, 3: 1}
y_true = np.array([2, 1, 0, 0, 4, 3, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0], dtype=np.int32)
y_pred = ELMo_NER.detect_token_labels(token_bounds, indices_of_named_entities, label_IDs, 16)
self.assertIsInstance(y_pred, np.ndarray)
self.assertEqual(y_true.shape, y_pred.shape)
self.assertEqual(y_true.tolist(), y_pred.tolist())
def test_detect_token_labels_positive02(self):
source_text = 'С 1876 г Павлов ассистирует профессору К. Н. Устимовичу в Медико-хирургической академии и ' \
'параллельно изучает физиологию кровообращения.'
tokenized_text = ['С', '1876', 'г', 'Павлов', 'ассистирует', 'профессору', 'К', '.', 'Н', '.', 'Устимовичу',
'в', 'Медико', '-', 'хирургической', 'академии', 'и', 'параллельно', 'изучает', 'физиологию',
'кровообращения', '.']
token_bounds = ELMo_NER.calculate_bounds_of_tokens(source_text, tokenized_text)
indices_of_named_entities = np.array(
[0, 0, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=np.int32
)
label_IDs = {1: 1, 2: 2, 3: 3, 4: 2, 5: 4}
y_true = np.array(
[0, 2, 1, 4, 0, 6, 4, 3, 3, 3, 3, 0, 8, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=np.int32
)
y_pred = ELMo_NER.detect_token_labels(token_bounds, indices_of_named_entities, label_IDs, 32)
self.assertIsInstance(y_pred, np.ndarray)
self.assertEqual(y_true.shape, y_pred.shape)
self.assertEqual(y_true.tolist(), y_pred.tolist())
def test_calculate_indices_of_named_entities(self):
source_text = 'Барак Обама принимает в Белом доме своего французского коллегу Николя Саркози.'
classes_list = ('LOCATION', 'ORG', 'PERSON')
named_entities = {'PERSON': [(0, 11), (63, 77)], 'LOCATION': [(24, 34)]}
true_indices = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 0],
dtype=np.int32
)
true_labels_to_classes = {1: 1, 2: 3, 3: 3}
indices, labels_to_classes = ELMo_NER.calculate_indices_of_named_entities(source_text, classes_list,
named_entities)
self.assertIsInstance(indices, np.ndarray)
self.assertIsInstance(labels_to_classes, dict)
self.assertEqual(true_indices.shape, indices.shape)
self.assertEqual(true_indices.tolist(), indices.tolist())
self.assertEqual(set(true_labels_to_classes.keys()), set(labels_to_classes.keys()))
for label_ID in true_labels_to_classes:
self.assertEqual(true_labels_to_classes[label_ID], labels_to_classes[label_ID])
def test_fit_positive01(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
def test_fit_positive02(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=True, max_epochs=3, batch_size=2, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=42, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertEqual(res.random_seed, 42)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
def test_fit_positive03(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
def test_fit_predict(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
y_pred = res.predict(X_train)
self.assertIsInstance(y_pred, list)
self.assertEqual(len(X_train), len(y_pred))
for sample_idx in range(len(y_pred)):
self.assertIsInstance(y_pred[sample_idx], dict)
f1, precision, recall, _ = calculate_prediction_quality(y_train, y_pred, res.classes_list_)
self.assertGreater(f1, 0.0)
self.assertGreater(precision, 0.0)
self.assertGreater(recall, 0.0)
def test_predict_negative(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, random_seed=None,
elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
with self.assertRaises(NotFittedError):
_ = self.ner.predict(X_train)
def test_serialize_positive01(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
res = self.ner.fit(X_train, y_train)
self.assertIsInstance(res, ELMo_NER)
self.assertTrue(hasattr(res, 'batch_size'))
self.assertTrue(hasattr(res, 'lr'))
self.assertTrue(hasattr(res, 'l2_reg'))
self.assertTrue(hasattr(res, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(res, 'finetune_elmo'))
self.assertTrue(hasattr(res, 'max_epochs'))
self.assertTrue(hasattr(res, 'patience'))
self.assertTrue(hasattr(res, 'random_seed'))
self.assertTrue(hasattr(res, 'gpu_memory_frac'))
self.assertTrue(hasattr(res, 'max_seq_length'))
self.assertTrue(hasattr(res, 'validation_fraction'))
self.assertTrue(hasattr(res, 'verbose'))
self.assertIsInstance(res.batch_size, int)
self.assertIsInstance(res.lr, float)
self.assertIsInstance(res.l2_reg, float)
self.assertIsInstance(res.elmo_hub_module_handle, str)
self.assertIsInstance(res.finetune_elmo, bool)
self.assertIsInstance(res.max_epochs, int)
self.assertIsInstance(res.patience, int)
self.assertIsInstance(res.random_seed, int)
self.assertIsInstance(res.gpu_memory_frac, float)
self.assertIsInstance(res.max_seq_length, int)
self.assertIsInstance(res.validation_fraction, float)
self.assertIsInstance(res.verbose, bool)
self.assertTrue(hasattr(res, 'classes_list_'))
self.assertTrue(hasattr(res, 'shapes_list_'))
self.assertTrue(hasattr(res, 'logits_'))
self.assertTrue(hasattr(res, 'transition_params_'))
self.assertTrue(hasattr(res, 'input_tokens_'))
self.assertTrue(hasattr(res, 'sequence_lengths_'))
self.assertTrue(hasattr(res, 'additional_features_'))
self.assertTrue(hasattr(res, 'y_ph_'))
self.assertTrue(hasattr(res, 'sess_'))
self.assertEqual(res.classes_list_, ('LOCATION', 'ORG', 'PERSON'))
self.assertIsInstance(res.shapes_list_, tuple)
self.assertGreater(len(res.shapes_list_), 0)
y_pred1 = res.predict(X_train)
self.assertIsInstance(y_pred1, list)
self.assertEqual(len(X_train), len(y_pred1))
for sample_idx in range(len(y_pred1)):
self.assertIsInstance(y_pred1[sample_idx], dict)
f1, precision, recall, _ = calculate_prediction_quality(y_train, y_pred1, res.classes_list_)
self.assertGreater(f1, 0.0)
self.assertGreater(precision, 0.0)
self.assertGreater(recall, 0.0)
self.temp_file_name = tempfile.NamedTemporaryFile(mode='w').name
with open(self.temp_file_name, mode='wb') as fp:
pickle.dump(res, fp)
del res, self.ner
gc.collect()
with open(self.temp_file_name, mode='rb') as fp:
self.ner = pickle.load(fp)
y_pred2 = self.ner.predict(X_train)
self.assertIsInstance(y_pred2, list)
self.assertEqual(len(y_pred2), len(y_pred2))
for sample_idx in range(len(y_pred2)):
self.assertIsInstance(y_pred2[sample_idx], dict)
self.assertEqual(set(y_pred1[sample_idx]), set(y_pred2[sample_idx]))
for ne_type in y_pred1[sample_idx]:
self.assertEqual(y_pred1[sample_idx][ne_type], y_pred2[sample_idx][ne_type])
def test_serialize_positive02(self):
self.ner = ELMo_NER(random_seed=31, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
old_batch_size = self.ner.batch_size
old_lr = self.ner.lr
old_l2_reg = self.ner.l2_reg
old_elmo_hub_module_handle = self.ner.elmo_hub_module_handle
old_finetune_elmo = self.ner.finetune_elmo
old_max_epochs = self.ner.max_epochs
old_patience = self.ner.patience
old_random_seed = self.ner.random_seed
old_gpu_memory_frac = self.ner.gpu_memory_frac
old_max_seq_length = self.ner.max_seq_length
old_validation_fraction = self.ner.validation_fraction
old_verbose = self.ner.verbose
self.temp_file_name = tempfile.NamedTemporaryFile().name
with open(self.temp_file_name, mode='wb') as fp:
pickle.dump(self.ner, fp)
del self.ner
gc.collect()
with open(self.temp_file_name, mode='rb') as fp:
self.ner = pickle.load(fp)
self.assertIsInstance(self.ner, ELMo_NER)
self.assertTrue(hasattr(self.ner, 'batch_size'))
self.assertTrue(hasattr(self.ner, 'lr'))
self.assertTrue(hasattr(self.ner, 'l2_reg'))
self.assertTrue(hasattr(self.ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.ner, 'max_epochs'))
self.assertTrue(hasattr(self.ner, 'patience'))
self.assertTrue(hasattr(self.ner, 'random_seed'))
self.assertTrue(hasattr(self.ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.ner, 'max_seq_length'))
self.assertTrue(hasattr(self.ner, 'validation_fraction'))
self.assertTrue(hasattr(self.ner, 'verbose'))
self.assertEqual(self.ner.batch_size, old_batch_size)
self.assertAlmostEqual(self.ner.lr, old_lr)
self.assertAlmostEqual(self.ner.l2_reg, old_l2_reg)
self.assertEqual(self.ner.elmo_hub_module_handle, old_elmo_hub_module_handle)
self.assertEqual(self.ner.finetune_elmo, old_finetune_elmo)
self.assertEqual(self.ner.max_epochs, old_max_epochs)
self.assertEqual(self.ner.patience, old_patience)
self.assertAlmostEqual(self.ner.gpu_memory_frac, old_gpu_memory_frac)
self.assertEqual(self.ner.max_seq_length, old_max_seq_length)
self.assertAlmostEqual(self.ner.validation_fraction, old_validation_fraction)
self.assertEqual(self.ner.verbose, old_verbose)
self.assertEqual(self.ner.random_seed, old_random_seed)
def test_copy_positive01(self):
self.ner = ELMo_NER(random_seed=0, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
self.another_ner = copy.copy(self.ner)
self.assertIsInstance(self.another_ner, ELMo_NER)
self.assertIsNot(self.ner, self.another_ner)
self.assertTrue(hasattr(self.another_ner, 'batch_size'))
self.assertTrue(hasattr(self.another_ner, 'lr'))
self.assertTrue(hasattr(self.another_ner, 'l2_reg'))
self.assertTrue(hasattr(self.another_ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.another_ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.another_ner, 'max_epochs'))
self.assertTrue(hasattr(self.another_ner, 'patience'))
self.assertTrue(hasattr(self.another_ner, 'random_seed'))
self.assertTrue(hasattr(self.another_ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.another_ner, 'max_seq_length'))
self.assertTrue(hasattr(self.another_ner, 'validation_fraction'))
self.assertTrue(hasattr(self.another_ner, 'verbose'))
self.assertEqual(self.ner.batch_size, self.another_ner.batch_size)
self.assertAlmostEqual(self.ner.lr, self.another_ner.lr)
self.assertAlmostEqual(self.ner.l2_reg, self.another_ner.l2_reg)
self.assertEqual(self.ner.elmo_hub_module_handle, self.another_ner.elmo_hub_module_handle)
self.assertEqual(self.ner.finetune_elmo, self.another_ner.finetune_elmo)
self.assertEqual(self.ner.max_epochs, self.another_ner.max_epochs)
self.assertEqual(self.ner.patience, self.another_ner.patience)
self.assertEqual(self.ner.random_seed, self.another_ner.random_seed)
self.assertAlmostEqual(self.ner.gpu_memory_frac, self.another_ner.gpu_memory_frac)
self.assertEqual(self.ner.max_seq_length, self.another_ner.max_seq_length)
self.assertAlmostEqual(self.ner.validation_fraction, self.another_ner.validation_fraction)
self.assertEqual(self.ner.verbose, self.another_ner.verbose)
def test_copy_positive02(self):
base_dir = os.path.join(os.path.dirname(__file__), 'testdata')
self.ner = ELMo_NER(finetune_elmo=False, max_epochs=3, batch_size=4, max_seq_length=64, gpu_memory_frac=0.9,
validation_fraction=0.3, random_seed=None, elmo_hub_module_handle=self.ELMO_HUB_MODULE)
X_train, y_train = load_dataset(os.path.join(base_dir, 'true_named_entities.json'))
self.ner.fit(X_train, y_train)
self.another_ner = copy.copy(self.ner)
self.assertIsInstance(self.another_ner, ELMo_NER)
self.assertIsNot(self.ner, self.another_ner)
self.assertTrue(hasattr(self.another_ner, 'batch_size'))
self.assertTrue(hasattr(self.another_ner, 'lr'))
self.assertTrue(hasattr(self.another_ner, 'l2_reg'))
self.assertTrue(hasattr(self.another_ner, 'elmo_hub_module_handle'))
self.assertTrue(hasattr(self.another_ner, 'finetune_elmo'))
self.assertTrue(hasattr(self.another_ner, 'max_epochs'))
self.assertTrue(hasattr(self.another_ner, 'patience'))
self.assertTrue(hasattr(self.another_ner, 'random_seed'))
self.assertTrue(hasattr(self.another_ner, 'gpu_memory_frac'))
self.assertTrue(hasattr(self.another_ner, 'max_seq_length'))
self.assertTrue(hasattr(self.another_ner, 'validation_fraction'))
self.assertTrue(hasattr(self.another_ner, 'verbose'))
self.assertTrue(hasattr(self.another_ner, 'classes_list_'))
self.assertTrue(hasattr(self.another_ner, 'shapes_list_'))
self.assertTrue(hasattr(self.another_ner, 'logits_'))
self.assertTrue(hasattr(self.another_ner, 'transition_params_'))
self.assertTrue(hasattr(self.another_ner, 'input_tokens_'))
self.assertTrue(hasattr(self.another_ner, 'sequence_lengths_'))
self.assertTrue(hasattr(self.another_ner, 'additional_features_'))
self.assertTrue(hasattr(self.another_ner, 'y_ph_'))
self.assertTrue(hasattr(self.another_ner, 'sess_'))
self.assertEqual(self.ner.batch_size, self.another_ner.batch_size)
self.assertAlmostEqual(self.ner.lr, self.another_ner.lr)
self.assertAlmostEqual(self.ner.l2_reg, self.another_ner.l2_reg)
self.assertEqual(self.ner.elmo_hub_module_handle, self.another_ner.elmo_hub_module_handle)
self.assertEqual(self.ner.finetune_elmo, self.another_ner.finetune_elmo)
self.assertEqual(self.ner.max_epochs, self.another_ner.max_epochs)
self.assertEqual(self.ner.patience, self.another_ner.patience)
self.assertEqual(self.ner.random_seed, self.another_ner.random_seed)
self.assertAlmostEqual(self.ner.gpu_memory_frac, self.another_ner.gpu_memory_frac)
self.assertEqual(self.ner.max_seq_length, self.another_ner.max_seq_length)
self.assertAlmostEqual(self.ner.validation_fraction, self.another_ner.validation_fraction)
self.assertEqual(self.ner.verbose, self.another_ner.verbose)
self.assertIs(self.ner.classes_list_, self.another_ner.classes_list_)
self.assertIs(self.ner.shapes_list_, self.another_ner.shapes_list_)
self.assertIs(self.ner.logits_, self.another_ner.logits_)
self.assertIs(self.ner.transition_params_, self.another_ner.transition_params_)
self.assertIs(self.ner.input_tokens_, self.another_ner.input_tokens_)
self.assertIs(self.ner.sequence_lengths_, self.another_ner.sequence_lengths_)
self.assertIs(self.ner.additional_features_, self.another_ner.additional_features_)
self.assertIs(self.ner.y_ph_, self.another_ner.y_ph_)
self.assertIs(self.ner.sess_, self.another_ner.sess_)
def test_calculate_bounds_of_named_entities(self):
bounds_of_tokens = [(0, 2), (2, 5), (5, 8), (8, 10), (11, 16), (17, 20), (20, 22), (22, 26), (26, 27), (28, 31),
(31, 34), (34, 37), (38, 48), (49, 52), (52, 54), (55, 57), (58, 59), (59, 61), (61, 63),
(64, 70), (71, 83), (84, 87), (87, 90), (90, 93), (93, 95), (95, 98), (98, 99)]
classes_list = ('LOCATION', 'ORG', 'PERSON')
labels_of_tokens = [0, 0, 2, 1, 1, 2, 1, 0, 0, 0, 4, 3, 0, 6, 5, 5, 5, 0, 5, 5, 0, 2, 2, 3, 3, 6, 5]
true_entities = {
'LOCATION': [(5, 16), (17, 22), (84, 87), (87, 90)],
'ORG': [(31, 37), (90, 95)],
'PERSON': [(49, 59), (61, 70), (95, 99)]
}
calc_entities = ELMo_NER.calculate_bounds_of_named_entities(bounds_of_tokens, classes_list, labels_of_tokens)
self.assertIsInstance(calc_entities, dict)
self.assertEqual(set(true_entities.keys()), set(calc_entities.keys()))
for entity_type in true_entities:
self.assertEqual(true_entities[entity_type], calc_entities[entity_type])
def test_get_shape_of_string_positive01(self):
src = 'уже'
dst = 'a'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive02(self):
src = 'К'
dst = 'A'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive03(self):
src = 'Однако'
dst = 'Aa'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive04(self):
src = '66–67'
dst = 'D-D'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_positive05(self):
src = '…'
dst = 'U'
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
def test_get_shape_of_string_negative(self):
src = ''
dst = ''
self.assertEqual(dst, ELMo_NER.get_shape_of_string(src))
if __name__ == '__main__':
unittest.main(verbosity=2)
| 54.537246
| 120
| 0.633126
| 9,447
| 72,480
| 4.622737
| 0.052821
| 0.008243
| 0.07742
| 0.013098
| 0.913673
| 0.892171
| 0.868357
| 0.854549
| 0.84555
| 0.83694
| 0
| 0.045023
| 0.250442
| 72,480
| 1,328
| 121
| 54.578313
| 0.758706
| 0
| 0
| 0.59292
| 0
| 0.004827
| 0.21061
| 0.006871
| 0
| 0
| 0
| 0
| 0.320998
| 1
| 0.06436
| false
| 0
| 0.012872
| 0
| 0.078037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e84d23fb4b720cca566d2742904cd909b7187d70
| 34
|
py
|
Python
|
31/00/list.remove.5.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | null | null | null |
31/00/list.remove.5.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | 38
|
2017-05-25T07:08:48.000Z
|
2017-05-31T01:42:41.000Z
|
31/00/list.remove.5.py
|
pylangstudy/201705
|
c69de524faa67fa2d96267d5a51ed9794208f0e4
|
[
"CC0-1.0"
] | null | null | null |
l = [1,2,3,4]
del l[100]
print(l)
| 8.5
| 13
| 0.529412
| 10
| 34
| 1.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.176471
| 34
| 3
| 14
| 11.333333
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8c9dd477284f0273b3698331381a0b72005ecde
| 1,423
|
py
|
Python
|
util/data/gen/textinputframework.dll.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
util/data/gen/textinputframework.dll.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
util/data/gen/textinputframework.dll.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
symbols = []
exports = [{'type': 'function', 'name': 'DllCanUnloadNow', 'address': '0x7ffb110e4e00'}, {'type': 'function', 'name': 'DllGetActivationFactory', 'address': '0x7ffb110e4e30'}, {'type': 'function', 'name': 'DllGetClassObject', 'address': '0x7ffb110e4e70'}, {'type': 'function', 'name': 'InputFocusChanged', 'address': '0x7ffb110b7a50'}, {'type': 'function', 'name': 'NavigateFocusInfoCreate', 'address': '0x7ffb1108b790'}, {'type': 'function', 'name': 'TextInputClientCreate', 'address': '0x7ffb110ae3f0'}, {'type': 'function', 'name': 'TextInputClientCreate2', 'address': '0x7ffb110ae420'}, {'type': 'function', 'name': 'TextInputHostCreate', 'address': '0x7ffb110a1a90'}, {'type': 'function', 'name': 'TextInputHostCreate2', 'address': '0x7ffb110e91c0'}, {'type': 'function', 'name': 'TextInputHostCreateEx', 'address': '0x7ffb110a1ba0'}, {'type': 'function', 'name': 'TextInputHostGetCurrent', 'address': '0x7ffb110e9270'}, {'type': 'function', 'name': 'TextInputHostSiteCreate', 'address': '0x7ffb110846f0'}, {'type': 'function', 'name': 'TextInputServerCreate', 'address': '0x7ffb110b1290'}, {'type': 'function', 'name': 'TsfOneCreate', 'address': '0x7ffb110a0a40'}, {'type': 'function', 'name': 'tsfGetAsyncKeyState', 'address': '0x7ffb110e92f0'}, {'type': 'function', 'name': 'tsfGetKeyState', 'address': '0x7ffb110a2020'}, {'type': 'function', 'name': 'tsfGetKeyboardState', 'address': '0x7ffb110e9380'}]
| 711.5
| 1,410
| 0.683064
| 104
| 1,423
| 9.346154
| 0.384615
| 0.209877
| 0.279835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107821
| 0.074491
| 1,423
| 2
| 1,410
| 711.5
| 0.63022
| 0
| 0
| 0
| 0
| 0
| 0.672753
| 0.124298
| 0
| 0
| 0.167135
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8d6e06c6af45d7061e8784a4a529ba02df0350a
| 16,985
|
py
|
Python
|
sdk/python/pulumi_kubernetes/core/v1/ServiceAccount.py
|
csssuf/pulumi-kubernetes
|
8d007166d0e8968fcabaeecd0cee13f9c08d97f1
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/core/v1/ServiceAccount.py
|
csssuf/pulumi-kubernetes
|
8d007166d0e8968fcabaeecd0cee13f9c08d97f1
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/core/v1/ServiceAccount.py
|
csssuf/pulumi-kubernetes
|
8d007166d0e8968fcabaeecd0cee13f9c08d97f1
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ... import meta as _meta
from ._inputs import *
__all__ = ['ServiceAccountArgs', 'ServiceAccount']
@pulumi.input_type
class ServiceAccountArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['LocalObjectReferenceArgs']]]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ObjectReferenceArgs']]]] = None):
"""
The set of arguments for constructing a ServiceAccount resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[bool] automount_service_account_token: AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.
:param pulumi.Input[Sequence[pulumi.Input['LocalObjectReferenceArgs']]] image_pull_secrets: ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[Sequence[pulumi.Input['ObjectReferenceArgs']]] secrets: Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'v1')
if automount_service_account_token is not None:
pulumi.set(__self__, "automount_service_account_token", automount_service_account_token)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if kind is not None:
pulumi.set(__self__, "kind", 'ServiceAccount')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="automountServiceAccountToken")
def automount_service_account_token(self) -> Optional[pulumi.Input[bool]]:
"""
AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.
"""
return pulumi.get(self, "automount_service_account_token")
@automount_service_account_token.setter
def automount_service_account_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automount_service_account_token", value)
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LocalObjectReferenceArgs']]]]:
"""
ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
"""
return pulumi.get(self, "image_pull_secrets")
@image_pull_secrets.setter
def image_pull_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LocalObjectReferenceArgs']]]]):
pulumi.set(self, "image_pull_secrets", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ObjectReferenceArgs']]]]:
"""
Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ObjectReferenceArgs']]]]):
pulumi.set(self, "secrets", value)
class ServiceAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocalObjectReferenceArgs']]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]]]] = None,
__props__=None):
"""
ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[bool] automount_service_account_token: AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocalObjectReferenceArgs']]]] image_pull_secrets: ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]]] secrets: Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ServiceAccountArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets
:param str resource_name: The name of the resource.
:param ServiceAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LocalObjectReferenceArgs']]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceAccountArgs.__new__(ServiceAccountArgs)
__props__.__dict__["api_version"] = 'v1'
__props__.__dict__["automount_service_account_token"] = automount_service_account_token
__props__.__dict__["image_pull_secrets"] = image_pull_secrets
__props__.__dict__["kind"] = 'ServiceAccount'
__props__.__dict__["metadata"] = metadata
__props__.__dict__["secrets"] = secrets
super(ServiceAccount, __self__).__init__(
'kubernetes:core/v1:ServiceAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ServiceAccount':
"""
Get an existing ServiceAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServiceAccountArgs.__new__(ServiceAccountArgs)
__props__.__dict__["api_version"] = None
__props__.__dict__["automount_service_account_token"] = None
__props__.__dict__["image_pull_secrets"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["secrets"] = None
return ServiceAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="automountServiceAccountToken")
def automount_service_account_token(self) -> pulumi.Output[Optional[bool]]:
"""
AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.
"""
return pulumi.get(self, "automount_service_account_token")
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> pulumi.Output[Optional[Sequence['outputs.LocalObjectReference']]]:
"""
ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
"""
return pulumi.get(self, "image_pull_secrets")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def secrets(self) -> pulumi.Output[Optional[Sequence['outputs.ObjectReference']]]:
"""
Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret
"""
return pulumi.get(self, "secrets")
| 62.907407
| 509
| 0.710863
| 2,033
| 16,985
| 5.757501
| 0.11215
| 0.055446
| 0.048697
| 0.043059
| 0.831269
| 0.804955
| 0.757112
| 0.719863
| 0.682358
| 0.649381
| 0
| 0.001756
| 0.195467
| 16,985
| 269
| 510
| 63.141264
| 0.854812
| 0.456108
| 0
| 0.4
| 1
| 0
| 0.144631
| 0.067602
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145455
| false
| 0.006061
| 0.048485
| 0
| 0.284848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2cdbdc43a6f9fe1e2fbc0e42b941e72a584fc3cc
| 6,572
|
py
|
Python
|
tests/unit/commands/status_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 421
|
2015-06-02T16:29:59.000Z
|
2021-06-03T18:44:42.000Z
|
tests/unit/commands/status_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 404
|
2015-06-02T20:23:42.000Z
|
2019-08-21T16:59:41.000Z
|
tests/unit/commands/status_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 16
|
2015-06-16T17:21:02.000Z
|
2020-03-27T02:27:09.000Z
|
from mock import patch, Mock, call
from ...testcases import DustyTestCase
from dusty.commands.status import _has_active_container, get_dusty_status
from dusty.schemas.base_schema_class import DustySchema
from ..utils import get_app_dusty_schema, get_bundle_dusty_schema, get_lib_dusty_schema
class TestStatusCommands(DustyTestCase):
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_lib_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(False, _has_active_container('lib', 'lib-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_lib_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('lib', 'lib-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_app_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(True, _has_active_container('app', 'app-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_app_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('app', 'app-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_service_active(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = ['some_container']
self.assertEquals(True, _has_active_container('service', 'service-a'))
@patch('dusty.commands.status.get_dusty_containers')
def test_has_active_container_service_inactive(self, fake_get_dusty_containers):
fake_get_dusty_containers.return_value = []
self.assertEquals(False, _has_active_container('service', 'service-a'))
@patch('dusty.commands.status.docker_vm_is_running')
@patch('dusty.systems.docker.get_docker_client')
@patch('dusty.commands.status.PrettyTable')
@patch('dusty.commands.status.get_dusty_containers')
@patch('dusty.schemas.base_schema_class.get_specs_from_path')
@patch('dusty.compiler.spec_assembler._get_referenced_apps')
@patch('dusty.compiler.spec_assembler._get_referenced_libs')
@patch('dusty.compiler.spec_assembler._get_referenced_services')
def test_get_dusty_status_active_1(self, fake_get_services, fake_get_libs, fake_get_apps, fake_get_specs,
fake_get_dusty_containers, fake_pretty_table, fake_get_docker_client, fake_vm_is_running):
fake_get_services.return_value = set(['ser1', 'ser2', 'ser3'])
fake_get_libs.return_value = set(['lib1'])
fake_get_apps.return_value = set(['app1', 'app2'])
fake_table = Mock()
fake_pretty_table.return_value = fake_table
fake_get_dusty_containers.return_value = ['some_container']
fake_get_specs.return_value = {'apps': {'app1': get_app_dusty_schema({}, 'app1'), 'app2':get_app_dusty_schema({}, 'app2')},
'libs': {'lib1': get_lib_dusty_schema({}, 'lib1')},
'services': {'ser1': DustySchema(None, {}, 'ser1', 'services'), 'ser2': DustySchema(None, {}, 'ser2', 'services'), 'ser3': DustySchema(None, {}, 'ser3', 'services')},
'bundles': get_lib_dusty_schema({}, 'bundle')}
fake_get_docker_client.return_value = None
fake_vm_is_running.return_value = True
get_dusty_status()
call_args_list = fake_table.add_row.call_args_list
self.assertTrue(call(['app1', 'app', 'X']) in call_args_list)
self.assertTrue(call(['app2', 'app', 'X']) in call_args_list)
self.assertTrue(call(['lib1', 'lib', '']) in call_args_list)
self.assertTrue(call(['ser1', 'service', 'X']) in call_args_list)
self.assertTrue(call(['ser2', 'service', 'X']) in call_args_list)
self.assertTrue(call(['ser3', 'service', 'X']) in call_args_list)
self.assertTrue(call(['dustyInternalNginx', '', 'X']) in call_args_list)
self.assertEquals(len(call_args_list), 7)
@patch('dusty.commands.status.docker_vm_is_running')
@patch('dusty.systems.docker.get_docker_client')
@patch('dusty.commands.status.PrettyTable')
@patch('dusty.commands.status.get_dusty_containers')
@patch('dusty.schemas.base_schema_class.get_specs_from_path')
@patch('dusty.compiler.spec_assembler._get_referenced_apps')
@patch('dusty.compiler.spec_assembler._get_referenced_libs')
@patch('dusty.compiler.spec_assembler._get_referenced_services')
def test_get_dusty_status_active_2(self, fake_get_services, fake_get_libs, fake_get_apps, fake_get_specs,
fake_get_dusty_containers, fake_pretty_table, fake_get_docker_client, fake_vm_is_running):
fake_get_services.return_value = set(['ser1', 'ser2', 'ser3'])
fake_get_libs.return_value = set(['lib1'])
fake_get_apps.return_value = set(['app1', 'app2'])
fake_table = Mock()
fake_pretty_table.return_value = fake_table
fake_get_dusty_containers.return_value = []
fake_get_specs.return_value = {'apps': {'app1': get_app_dusty_schema({}, 'app1'), 'app2':get_app_dusty_schema({}, 'app2')},
'libs': {'lib1': get_lib_dusty_schema({}, 'lib1')},
'services': {'ser1': DustySchema(None, {}, 'ser1', 'services'), 'ser2': DustySchema(None, {}, 'ser2', 'services'), 'ser3': DustySchema(None, {}, 'ser3', 'services')},
'bundles': get_lib_dusty_schema({}, 'bundle')}
fake_get_docker_client.return_value = None
fake_vm_is_running.return_value = True
get_dusty_status()
call_args_list = fake_table.add_row.call_args_list
self.assertTrue(call(['app1', 'app', '']) in call_args_list)
self.assertTrue(call(['app2', 'app', '']) in call_args_list)
self.assertTrue(call(['lib1', 'lib', '']) in call_args_list)
self.assertTrue(call(['ser1', 'service', '']) in call_args_list)
self.assertTrue(call(['ser2', 'service', '']) in call_args_list)
self.assertTrue(call(['ser3', 'service', '']) in call_args_list)
self.assertTrue(call(['dustyInternalNginx', '', '']) in call_args_list)
self.assertEquals(len(call_args_list), 7)
| 63.192308
| 205
| 0.689744
| 823
| 6,572
| 5.085055
| 0.09356
| 0.060215
| 0.103226
| 0.08411
| 0.940741
| 0.934289
| 0.93405
| 0.93405
| 0.914695
| 0.846117
| 0
| 0.009605
| 0.176202
| 6,572
| 103
| 206
| 63.805825
| 0.763391
| 0
| 0
| 0.680851
| 0
| 0
| 0.227024
| 0.1479
| 0
| 0
| 0
| 0
| 0.234043
| 1
| 0.085106
| false
| 0
| 0.053191
| 0
| 0.148936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2ced6ac8779a2621bf68d4b0f580e7b3aeab83a4
| 58
|
py
|
Python
|
prt_mail_messages/__init__.py
|
sagar-organiztion/custom_repo2
|
30687143b3b6a7820305075d15fd15fbd61141d1
|
[
"Apache-2.0"
] | null | null | null |
prt_mail_messages/__init__.py
|
sagar-organiztion/custom_repo2
|
30687143b3b6a7820305075d15fd15fbd61141d1
|
[
"Apache-2.0"
] | null | null | null |
prt_mail_messages/__init__.py
|
sagar-organiztion/custom_repo2
|
30687143b3b6a7820305075d15fd15fbd61141d1
|
[
"Apache-2.0"
] | null | null | null |
from . import models # noqa
from . import wizard # noqa
| 19.333333
| 28
| 0.689655
| 8
| 58
| 5
| 0.625
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 58
| 2
| 29
| 29
| 0.909091
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa201b797e417adad9763e6671010a321eaecba1
| 725
|
py
|
Python
|
test/test_cluster_plugin.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | 1
|
2021-11-20T13:37:43.000Z
|
2021-11-20T13:37:43.000Z
|
test/test_cluster_plugin.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | null | null | null |
test/test_cluster_plugin.py
|
RyanSiu1995/argocd-python-client
|
2e8f097fe09f247a46ac70692241a93d1acd076a
|
[
"MIT"
] | null | null | null |
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import argocd_python_client
from argocd_python_client.model.cluster_plugin import ClusterPlugin
class TestClusterPlugin(unittest.TestCase):
"""ClusterPlugin unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterPlugin(self):
"""Test ClusterPlugin"""
# FIXME: construct object with mandatory attributes with example values
# model = ClusterPlugin() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 20.138889
| 79
| 0.685517
| 80
| 725
| 6.05
| 0.65
| 0.033058
| 0.07438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010811
| 0.234483
| 725
| 35
| 80
| 20.714286
| 0.861261
| 0.434483
| 0
| 0.230769
| 1
| 0
| 0.02139
| 0
| 0
| 0
| 0
| 0.028571
| 0
| 1
| 0.230769
| false
| 0.230769
| 0.307692
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fa575a9ef17332478ba773f79636bab15ead7d4d
| 69
|
py
|
Python
|
connectedcars/__init__.py
|
heslegrave/connectedcars-python
|
29dca7bb33d549dbb4803688032ae3a13b932eba
|
[
"MIT"
] | 4
|
2019-11-11T00:21:33.000Z
|
2020-10-27T19:47:35.000Z
|
connectedcars/__init__.py
|
heslegrave/connectedcars-python
|
29dca7bb33d549dbb4803688032ae3a13b932eba
|
[
"MIT"
] | 2
|
2020-06-29T20:17:55.000Z
|
2020-10-25T19:17:38.000Z
|
connectedcars/__init__.py
|
heslegrave/connectedcars-python
|
29dca7bb33d549dbb4803688032ae3a13b932eba
|
[
"MIT"
] | 2
|
2020-07-20T16:08:48.000Z
|
2020-11-01T14:55:20.000Z
|
from .client import *
from .models import *
from .exceptions import *
| 23
| 25
| 0.753623
| 9
| 69
| 5.777778
| 0.555556
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15942
| 69
| 3
| 25
| 23
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7087e9ee4cdb577e50b32daacbb521e85f4e480
| 492
|
py
|
Python
|
opendeep/optimization/loss/__init__.py
|
vitruvianscience/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 252
|
2015-03-13T21:55:22.000Z
|
2021-09-06T21:37:38.000Z
|
opendeep/optimization/loss/__init__.py
|
afcarl/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 16
|
2015-03-14T06:47:04.000Z
|
2016-09-23T19:13:35.000Z
|
opendeep/optimization/loss/__init__.py
|
afcarl/OpenDeep
|
e96efc449101094354b615cf15afe6d03644fc36
|
[
"Apache-2.0"
] | 68
|
2015-03-14T00:05:53.000Z
|
2020-06-04T13:36:13.000Z
|
from __future__ import division, absolute_import, print_function
from opendeep.optimization.loss.loss import *
from opendeep.optimization.loss.binary_crossentropy import *
from opendeep.optimization.loss.categorical_crossentropy import *
from opendeep.optimization.loss.isotropic_gaussian_LL import *
from opendeep.optimization.loss.mse import *
from opendeep.optimization.loss.neg_LL import *
from opendeep.optimization.loss.zero_one import *
from opendeep.optimization.loss import utils
| 37.846154
| 65
| 0.851626
| 62
| 492
| 6.564516
| 0.33871
| 0.235872
| 0.471744
| 0.550369
| 0.653563
| 0.402948
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 492
| 12
| 66
| 41
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.111111
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad7ba4397c5c934384de131b1688608495888b8a
| 153,371
|
py
|
Python
|
lib/ult/ult.py
|
abreza/HOI-CL
|
c5be517bb26eac73ef88a39d6ec9e564c3379714
|
[
"MIT"
] | 40
|
2021-04-09T17:53:08.000Z
|
2022-03-30T02:38:10.000Z
|
lib/ult/ult.py
|
abreza/HOI-CL
|
c5be517bb26eac73ef88a39d6ec9e564c3379714
|
[
"MIT"
] | 21
|
2021-04-09T19:05:47.000Z
|
2022-01-31T23:17:16.000Z
|
lib/ult/ult.py
|
abreza/HOI-CL
|
c5be517bb26eac73ef88a39d6ec9e564c3379714
|
[
"MIT"
] | 8
|
2021-05-30T12:37:00.000Z
|
2022-03-14T03:13:57.000Z
|
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhi Hou
# --------------------------------------------------------
"""
Generating training instance
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as np
import json
import pickle
import random
from random import randint
import tensorflow as tf
import cv2
from .config import cfg
# for merge COCO and HICO dataset
MAX_COCO_ID = 650000
MAX_HICO_ID = 40000
def bbox_trans(human_box_ori, object_box_ori, ratio, size=64):
human_box = human_box_ori.copy()
object_box = object_box_ori.copy()
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
ratio = 'height'
else:
ratio = 'width'
# shift the top-left corner to (0,0)
human_box[0] -= InteractionPattern[0]
human_box[2] -= InteractionPattern[0]
human_box[1] -= InteractionPattern[1]
human_box[3] -= InteractionPattern[1]
object_box[0] -= InteractionPattern[0]
object_box[2] -= InteractionPattern[0]
object_box[1] -= InteractionPattern[1]
object_box[3] -= InteractionPattern[1]
if ratio == 'height': # height is larger than width
human_box[0] = 0 + size * human_box[0] / height
human_box[1] = 0 + size * human_box[1] / height
human_box[2] = (size * width / height - 1) - size * (width - 1 - human_box[2]) / height
human_box[3] = (size - 1) - size * (height - 1 - human_box[3]) / height
object_box[0] = 0 + size * object_box[0] / height
object_box[1] = 0 + size * object_box[1] / height
object_box[2] = (size * width / height - 1) - size * (width - 1 - object_box[2]) / height
object_box[3] = (size - 1) - size * (height - 1 - object_box[3]) / height
# Need to shift horizontally
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[3] == 63) & (InteractionPattern[2] <= 63)
if human_box[3] > object_box[3]:
human_box[3] = size - 1
else:
object_box[3] = size - 1
shift = size / 2 - (InteractionPattern[2] + 1) / 2
human_box += [shift, 0, shift, 0]
object_box += [shift, 0, shift, 0]
else: # width is larger than height
human_box[0] = 0 + size * human_box[0] / width
human_box[1] = 0 + size * human_box[1] / width
human_box[2] = (size - 1) - size * (width - 1 - human_box[2]) / width
human_box[3] = (size * height / width - 1) - size * (height - 1 - human_box[3]) / width
object_box[0] = 0 + size * object_box[0] / width
object_box[1] = 0 + size * object_box[1] / width
object_box[2] = (size - 1) - size * (width - 1 - object_box[2]) / width
object_box[3] = (size * height / width - 1) - size * (height - 1 - object_box[3]) / width
# Need to shift vertically
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[2] == 63) & (InteractionPattern[3] <= 63)
if human_box[2] > object_box[2]:
human_box[2] = size - 1
else:
object_box[2] = size - 1
shift = size / 2 - (InteractionPattern[3] + 1) / 2
human_box = human_box + [0, shift, 0, shift]
object_box = object_box + [0, shift, 0, shift]
return np.round(human_box), np.round(object_box)
def Get_next_sp(human_box, object_box):
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
H, O = bbox_trans(human_box, object_box, 'height')
else:
H, O = bbox_trans(human_box, object_box, 'width')
Pattern = np.zeros((64, 64, 2))
Pattern[int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1, 0] = 1
Pattern[int(O[1]):int(O[3]) + 1, int(O[0]):int(O[2]) + 1, 1] = 1
return Pattern
#
# def Get_next_sp_with_pose(human_box, object_box, human_pose, num_joints=17):
# InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),
# max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
# height = InteractionPattern[3] - InteractionPattern[1] + 1
# width = InteractionPattern[2] - InteractionPattern[0] + 1
# if height > width:
# H, O = bbox_trans(human_box, object_box, 'height')
# else:
# H, O = bbox_trans(human_box, object_box, 'width')
#
# Pattern = np.zeros((64, 64, 2), dtype='float32')
# Pattern[int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1, 0] = 1
# Pattern[int(O[1]):int(O[3]) + 1, int(O[0]):int(O[2]) + 1, 1] = 1
#
# if human_pose != None and len(human_pose) == 51:
# skeleton = get_skeleton(human_box, human_pose, H, num_joints)
# else:
# skeleton = np.zeros((64, 64, 1), dtype='float32')
# skeleton[int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1, 0] = 0.05
#
# Pattern = np.concatenate((Pattern, skeleton), axis=2)
#
# return Pattern
def get_skeleton(human_box, human_pose, human_pattern, num_joints=17, size=64):
width = human_box[2] - human_box[0] + 1
height = human_box[3] - human_box[1] + 1
pattern_width = human_pattern[2] - human_pattern[0] + 1
pattern_height = human_pattern[3] - human_pattern[1] + 1
joints = np.zeros((num_joints + 1, 2), dtype='int32')
for i in range(num_joints):
joint_x, joint_y, joint_score = human_pose[3 * i: 3 * (i + 1)]
x_ratio = (joint_x - human_box[0]) / float(width)
y_ratio = (joint_y - human_box[1]) / float(height)
joints[i][0] = min(size - 1, int(round(x_ratio * pattern_width + human_pattern[0])))
joints[i][1] = min(size - 1, int(round(y_ratio * pattern_height + human_pattern[1])))
joints[num_joints] = (joints[5] + joints[6]) / 2
return draw_relation(human_pattern, joints)
def draw_relation(human_pattern, joints, size=64):
joint_relation = [[1, 3], [2, 4], [0, 1], [0, 2], [0, 17], [5, 17], [6, 17], [5, 7], [6, 8], [7, 9], [8, 10],
[11, 17], [12, 17], [11, 13], [12, 14], [13, 15], [14, 16]]
color = [0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
skeleton = np.zeros((size, size, 1), dtype="float32")
for i in range(len(joint_relation)):
cv2.line(skeleton, tuple(joints[joint_relation[i][0]]), tuple(joints[joint_relation[i][1]]), (color[i]))
# cv2.rectangle(skeleton, (int(human_pattern[0]), int(human_pattern[1])), (int(human_pattern[2]), int(human_pattern[3])), (255))
# cv2.imshow("Joints", skeleton)
# cv2.waitKey(0)
# print(skeleton[:,:,0])
return skeleton
def bb_IOU(boxA, boxB):
ixmin = np.maximum(boxA[0], boxB[0])
iymin = np.maximum(boxA[1], boxB[1])
ixmax = np.minimum(boxA[2], boxB[2])
iymax = np.minimum(boxA[3], boxB[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((boxB[2] - boxB[0] + 1.) * (boxB[3] - boxB[1] + 1.) +
(boxA[2] - boxA[0] + 1.) *
(boxA[3] - boxA[1] + 1.) - inters)
overlaps = inters / uni
return overlaps
def Augmented_box(bbox, shape, image_id, augment=15):
thres_ = 0.7
box = np.array([0, bbox[0], bbox[1], bbox[2], bbox[3]]).reshape(1, 5)
box = box.astype(np.float64)
if bbox[0] >= bbox[2] or bbox[1] >= bbox[3]:
return box
count = 0
time_count = 0
while count < augment:
time_count += 1
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
height_cen = (bbox[3] + bbox[1]) / 2
width_cen = (bbox[2] + bbox[0]) / 2
ratio = 1 + randint(-10, 10) * 0.01
height_shift = randint(-np.floor(height), np.floor(height)) * 0.1
width_shift = randint(-np.floor(width), np.floor(width)) * 0.1
H_0 = max(0, width_cen + width_shift - ratio * width / 2)
H_2 = min(shape[1] - 1, width_cen + width_shift + ratio * width / 2)
H_1 = max(0, height_cen + height_shift - ratio * height / 2)
H_3 = min(shape[0] - 1, height_cen + height_shift + ratio * height / 2)
if bb_IOU(bbox, np.array([H_0, H_1, H_2, H_3])) > thres_:
box_ = np.array([0, H_0, H_1, H_2, H_3]).reshape(1, 5)
box = np.concatenate((box, box_), axis=0)
count += 1
if time_count > 150:
return box
return box
def Generate_action(action_list, nums=29):
action_ = np.zeros(nums)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1, nums)
return action_
def Get_Next_Instance_HO_Neg(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter % Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg'
import os
if not os.path.exists(im_file):
print("not existing:", im_file)
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H = Augmented_HO_Neg(
GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes_solo'] = Human_augmented_solo
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
blobs['H_num'] = len(action_H)
return blobs
def Augmented_HO_Neg(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
action_HO_ = Generate_action(GT[1])
action_H_ = Generate_action(GT[4])
mask_HO_ = np.asarray(
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]).reshape(1, 29)
mask_H_ = np.asarray(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).reshape(1, 29)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented_solo = Human_augmented.copy()
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_HO = action_HO_
action_H = action_H_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
for i in range(num_pos - 1):
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
for i in range(num_pos_neg - num_pos):
action_HO = np.concatenate((action_HO, np.zeros(29).reshape(1, 29)), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Human_augmented_solo = Human_augmented_solo.reshape(num_pos, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 29)
action_H = action_H.reshape(num_pos, 29)
mask_HO = mask_HO.reshape(num_pos_neg, 29)
mask_H = mask_H.reshape(num_pos, 29)
return Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H
def Augmented_HO_spNeg(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (4, 1), (4, 19),
(4, 28), (4, 46), (4, 47), (4, 48), (4, 49), (4, 51), (4, 52), (4, 54),
(4, 55), (4, 56), (5, 2), (5, 3), (5, 4), (5, 6), (5, 7), (5, 8), (5, 9),
(5, 18), (5, 21), (6, 68), (7, 33), (8, 64), (9, 47), (9, 48), (9, 49),
(9, 50), (9, 51), (9, 52), (9, 53), (9, 54), (9, 55), (9, 56), (10, 2),
(10, 4), (10, 14), (10, 18), (10, 21), (10, 25), (10, 27), (10, 29),
(10, 57), (10, 58), (10, 60), (10, 61), (10, 62), (10, 64), (11, 31),
(11, 32), (11, 37), (11, 38), (12, 14), (12, 57), (12, 58), (12, 60),
(12, 61), (13, 40), (13, 41), (13, 42), (13, 46), (14, 1), (14, 25),
(14, 26), (14, 27), (14, 29), (14, 30), (14, 31), (14, 32), (14, 33),
(14, 34), (14, 35), (14, 37), (14, 38), (14, 39), (14, 40), (14, 41),
(14, 42), (14, 47), (14, 50), (14, 68), (14, 74), (14, 75), (14, 78),
(15, 30), (15, 33), (16, 43), (16, 44), (16, 45), (18, 1), (18, 2),
(18, 3), (18, 4), (18, 5), (18, 6), (18, 7), (18, 8), (18, 11),
(18, 14), (18, 15), (18, 16), (18, 17), (18, 18), (18, 19), (18, 20),
(18, 21), (18, 24), (18, 25), (18, 26), (18, 27), (18, 28), (18, 29),
(18, 30), (18, 31), (18, 32), (18, 33), (18, 34), (18, 35), (18, 36),
(18, 37), (18, 38), (18, 39), (18, 40), (18, 41), (18, 42), (18, 43),
(18, 44), (18, 45), (18, 46), (18, 47), (18, 48), (18, 49), (18, 51),
(18, 53), (18, 54), (18, 55), (18, 56), (18, 57), (18, 61), (18, 62),
(18, 63), (18, 64), (18, 65), (18, 66), (18, 67), (18, 68), (18, 73),
(18, 74), (18, 75), (18, 77), (19, 35), (19, 39), (20, 33), (21, 31),
(21, 32), (23, 1), (23, 11), (23, 19), (23, 20), (23, 24), (23, 28),
(23, 34), (23, 49), (23, 53), (23, 56), (23, 61), (23, 63), (23, 64),
(23, 67), (23, 68), (23, 73), (24, 74), (25, 1), (25, 2), (25, 4),
(25, 8), (25, 9), (25, 14), (25, 15), (25, 16), (25, 17), (25, 18),
(25, 19), (25, 21), (25, 25), (25, 26), (25, 27), (25, 28), (25, 29),
(25, 30), (25, 31), (25, 32), (25, 33), (25, 34), (25, 35), (25, 36),
(25, 37), (25, 38), (25, 39), (25, 40), (25, 41), (25, 42), (25, 43),
(25, 44), (25, 45), (25, 46), (25, 47), (25, 48), (25, 49), (25, 50),
(25, 51), (25, 52), (25, 53), (25, 54), (25, 55), (25, 56), (25, 57),
(25, 64), (25, 65), (25, 66), (25, 67), (25, 68), (25, 73), (25, 74),
(25, 77), (25, 78), (25, 79), (25, 80), (26, 32), (26, 37), (28, 30),
(28, 33)]
action_sp_ = Generate_action(GT[1])
action_HO_ = Generate_action(GT[1])
obj_cls = GT[-1]
action_compose = [set_list.index(item) for item in [(ho, obj_cls[0]) for ho in GT[1]]]
action_compose_ = Generate_action(action_compose, nums=len(set_list))
action_H_ = Generate_action(GT[4])
mask_sp_ = np.asarray(
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]).reshape(1, 29)
mask_HO_ = np.asarray(
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]).reshape(1, 29)
mask_H_ = np.asarray(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]).reshape(1, 29)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
if Human[0] == 0 and Human[1] == 0 and Human[2] == 0:
while len(Human_augmented) < Pos_augment + 1:
Human_augmented = np.concatenate(
[Human_augmented, Human_augmented[-(Pos_augment + 1 - len(Human_augmented)):]], axis=0)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_sp = action_sp_
action_HO = action_HO_
action_H = action_H_
action_compose = action_compose_
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
for i in range(num_pos - 1):
action_sp = np.concatenate((action_sp, action_sp_), axis=0)
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
action_compose = np.concatenate((action_compose, action_compose_), axis=0)
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(29).reshape(1, 29)), axis=0)
action_compose = np.concatenate((action_compose, np.zeros(len(set_list)).reshape(1, len(set_list))), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented_sp = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 29)
action_HO = action_HO.reshape(num_pos, 29)
action_H = action_H.reshape(num_pos, 29)
action_compose = action_compose.reshape(num_pos, len(set_list))
mask_sp = mask_sp.reshape(num_pos_neg, 29)
mask_HO = mask_HO.reshape(num_pos, 29)
mask_H = mask_H.reshape(num_pos, 29)
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose
def Augmented_HO_spNeg2(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (3, 1), (3, 19), (3, 28), (3, 46), (3, 47),
(3, 48), (3, 49), (3, 51), (3, 52), (3, 54), (3, 55), (3, 56), (4, 2), (4, 3), (4, 4), (4, 6), (4, 7),
(4, 8), (4, 9), (4, 18), (4, 21), (5, 68), (6, 33), (7, 64), (8, 47), (8, 48), (8, 49), (8, 50),
(8, 51), (8, 52), (8, 53), (8, 54), (8, 55), (8, 56), (9, 2), (9, 4), (9, 14), (9, 18), (9, 21),
(9, 25), (9, 27), (9, 29), (9, 57), (9, 58), (9, 60), (9, 61), (9, 62), (9, 64), (10, 31), (10, 32),
(10, 37), (10, 38), (11, 14), (11, 57), (11, 58), (11, 60), (11, 61), (12, 40), (12, 41), (12, 42),
(12, 46), (13, 1), (13, 25), (13, 26), (13, 27), (13, 29), (13, 30), (13, 31), (13, 32), (13, 33),
(13, 34), (13, 35), (13, 37), (13, 38), (13, 39), (13, 40), (13, 41), (13, 42), (13, 47), (13, 50),
(13, 68), (13, 74), (13, 75), (13, 78), (14, 30), (14, 33), (15, 43), (15, 44), (15, 45), (16, 1),
(16, 2), (16, 3), (16, 4), (16, 5), (16, 6), (16, 7), (16, 8), (16, 11), (16, 14), (16, 15), (16, 16),
(16, 17), (16, 18), (16, 19), (16, 20), (16, 21), (16, 24), (16, 25), (16, 26), (16, 27), (16, 28),
(16, 29), (16, 30), (16, 31), (16, 32), (16, 33), (16, 34), (16, 35), (16, 36), (16, 37), (16, 38),
(16, 39), (16, 40), (16, 41), (16, 42), (16, 43), (16, 44), (16, 45), (16, 46), (16, 47), (16, 48),
(16, 49), (16, 51), (16, 53), (16, 54), (16, 55), (16, 56), (16, 57), (16, 61), (16, 62), (16, 63),
(16, 64), (16, 65), (16, 66), (16, 67), (16, 68), (16, 73), (16, 74), (16, 75), (16, 77), (17, 35),
(17, 39), (18, 33), (19, 31), (19, 32), (20, 74), (21, 1), (21, 2), (21, 4), (21, 8), (21, 9), (21, 14),
(21, 15), (21, 16), (21, 17), (21, 18), (21, 19), (21, 21), (21, 25), (21, 26), (21, 27), (21, 28),
(21, 29), (21, 30), (21, 31), (21, 32), (21, 33), (21, 34), (21, 35), (21, 36), (21, 37), (21, 38),
(21, 39), (21, 40), (21, 41), (21, 42), (21, 43), (21, 44), (21, 45), (21, 46), (21, 47), (21, 48),
(21, 49), (21, 50), (21, 51), (21, 52), (21, 53), (21, 54), (21, 55), (21, 56), (21, 57), (21, 64),
(21, 65), (21, 66), (21, 67), (21, 68), (21, 73), (21, 74), (21, 77), (21, 78), (21, 79), (21, 80),
(22, 32), (22, 37), (23, 30), (23, 33)]
action_sp_ = Generate_action(GT[1], nums=24)
action_HO_ = Generate_action(GT[1], nums=24)
obj_cls = GT[-1]
action_compose = [set_list.index(item) for item in [(ho, obj_cls[0]) for ho in GT[1]]]
action_compose_ = Generate_action(action_compose, nums=len(set_list))
action_H_ = Generate_action(GT[4], nums=24)
mask_sp_ = np.ones([1, 24], np.int32)
mask_HO_ = np.ones([1, 24], np.int32)
mask_H_ = np.ones([1, 24], np.int32)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
# pose_list = [GT[5]] * num_pos
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_sp = action_sp_
action_HO = action_HO_
action_H = action_H_
action_compose = action_compose_
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
pose_box = []
# print('pose infor:', GT[5], pose_list)
# pose_box = obtain_pose_box(Human_augmented, pose_list, shape)
for item in Human_augmented:
pose_box.extend([item] * 17)
for i in range(num_pos - 1):
action_sp = np.concatenate((action_sp, action_sp_), axis=0)
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
action_compose = np.concatenate((action_compose, action_compose_), axis=0)
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(24).reshape(1, 24)), axis=0)
action_compose = np.concatenate((action_compose, np.zeros(len(set_list)).reshape(1, len(set_list))), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
mask = np.zeros(shape=(1, shape[0] // 16, shape[1] // 16, 1), dtype=np.float32)
# obj_box = Object_augmented[i][1:].astype(np.int32)
# print(obj_box)
# mask[:, obj_box[0]:obj_box[2], obj_box[1]:obj_box[3]] = 1
# from skimage import transform
# mask = transform.resize(mask, [1, shape[0] // 16, shape[1] // 16, 1], order=0, preserve_range=True)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented_sp = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 24)
action_HO = action_HO.reshape(num_pos, 24)
action_H = action_H.reshape(num_pos, 24)
action_compose = action_compose.reshape(num_pos_neg, len(set_list))
mask_sp = mask_sp.reshape(num_pos_neg, 24)
mask_HO = mask_HO.reshape(num_pos, 24)
mask_H = mask_H.reshape(num_pos, 24)
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose
def Augmented_HO_spNeg3(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
set_list = [(0, 38), (1, 31), (1, 32), (2, 1), (2, 19), (2, 28), (2, 43), (2, 44), (2, 46), (2, 47), (2, 48),
(2, 49),
(2, 51), (2, 52), (2, 54), (2, 55), (2, 56), (2, 77), (3, 2), (3, 3), (3, 4), (3, 6), (3, 7), (3, 8),
(3, 9), (3, 18), (3, 21), (4, 68), (5, 33), (6, 64), (7, 43), (7, 44), (7, 45), (7, 47), (7, 48),
(7, 49),
(7, 50), (7, 51), (7, 52), (7, 53), (7, 54), (7, 55), (7, 56), (8, 2), (8, 4), (8, 14), (8, 18),
(8, 21),
(8, 25), (8, 27), (8, 29), (8, 57), (8, 58), (8, 60), (8, 61), (8, 62), (8, 64), (9, 31), (9, 32),
(9, 37),
(9, 38), (10, 14), (10, 57), (10, 58), (10, 60), (10, 61), (11, 40), (11, 41), (11, 42), (11, 46),
(12, 1),
(12, 25), (12, 26), (12, 27), (12, 29), (12, 30), (12, 31), (12, 32), (12, 33), (12, 34), (12, 35),
(12, 37), (12, 38), (12, 39), (12, 40), (12, 41), (12, 42), (12, 47), (12, 50), (12, 68), (12, 74),
(12, 75), (12, 78), (13, 30), (13, 33), (14, 1), (14, 2), (14, 3), (14, 4), (14, 5), (14, 6), (14, 7),
(14, 8), (14, 11), (14, 14), (14, 15), (14, 16), (14, 17), (14, 18), (14, 19), (14, 20), (14, 21),
(14, 24),
(14, 25), (14, 26), (14, 27), (14, 28), (14, 29), (14, 30), (14, 31), (14, 32), (14, 33), (14, 34),
(14, 35), (14, 36), (14, 37), (14, 38), (14, 39), (14, 40), (14, 41), (14, 42), (14, 43), (14, 44),
(14, 45), (14, 46), (14, 47), (14, 48), (14, 49), (14, 51), (14, 53), (14, 54), (14, 55), (14, 56),
(14, 57), (14, 61), (14, 62), (14, 63), (14, 64), (14, 65), (14, 66), (14, 67), (14, 68), (14, 73),
(14, 74), (14, 75), (14, 77), (15, 33), (15, 35), (15, 39), (16, 31), (16, 32), (17, 74), (18, 1),
(18, 2),
(18, 4), (18, 8), (18, 9), (18, 14), (18, 15), (18, 16), (18, 17), (18, 18), (18, 19), (18, 21),
(18, 25),
(18, 26), (18, 27), (18, 28), (18, 29), (18, 30), (18, 31), (18, 32), (18, 33), (18, 34), (18, 35),
(18, 36), (18, 37), (18, 38), (18, 39), (18, 40), (18, 41), (18, 42), (18, 43), (18, 44), (18, 45),
(18, 46), (18, 47), (18, 48), (18, 49), (18, 50), (18, 51), (18, 52), (18, 53), (18, 54), (18, 55),
(18, 56), (18, 57), (18, 64), (18, 65), (18, 66), (18, 67), (18, 68), (18, 73), (18, 74), (18, 77),
(18, 78), (18, 79), (18, 80), (19, 32), (19, 37), (20, 30), (20, 33)]
action_sp_ = Generate_action(GT[1], nums=21)
action_HO_ = Generate_action(GT[1], nums=21)
obj_cls = GT[-1]
action_compose = [set_list.index(item) for item in [(ho, obj_cls[0]) for ho in GT[1]]]
action_compose_ = Generate_action(action_compose, nums=len(set_list))
action_H_ = Generate_action(GT[4], nums=21)
mask_sp_ = np.ones([1, 21], np.int32)
mask_HO_ = np.ones([1, 21], np.int32)
mask_H_ = np.ones([1, 21], np.int32)
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
# pose_list = [GT[5]] * num_pos
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
# pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
num_pos_neg = len(Human_augmented)
action_sp = action_sp_
action_HO = action_HO_
action_H = action_H_
action_compose = action_compose_
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
Pattern = np.empty((0, 64, 64, 2), dtype=np.float32)
pose_box = []
# print('pose infor:', GT[5], pose_list)
# pose_box = obtain_pose_box(Human_augmented, pose_list, shape)
for item in Human_augmented:
pose_box.extend([item] * 17)
for i in range(num_pos - 1):
action_sp = np.concatenate((action_sp, action_sp_), axis=0)
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
action_H = np.concatenate((action_H, action_H_), axis=0)
action_compose = np.concatenate((action_compose, action_compose_), axis=0)
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(21).reshape(1, 21)), axis=0)
action_compose = np.concatenate((action_compose, np.zeros(len(set_list)).reshape(1, len(set_list))), axis=0)
for i in range(num_pos_neg):
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape([1, 64, 64, 2])
# Pattern_ = np.concatenate([Pattern_, np.zeros([1, 64, 64, 1])], axis=-1)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
mask = np.zeros(shape=(1, shape[0] // 16, shape[1] // 16, 1), dtype=np.float32)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, 2)
Human_augmented_sp = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 21)
action_HO = action_HO.reshape(num_pos, 21)
action_H = action_H.reshape(num_pos, 21)
action_compose = action_compose.reshape(num_pos_neg, len(set_list))
mask_sp = mask_sp.reshape(num_pos_neg, 21)
mask_HO = mask_HO.reshape(num_pos, 21)
mask_H = mask_H.reshape(num_pos, 21)
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose
def Generate_action_HICO(action_list):
action_ = np.zeros(600)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1, 600)
return action_
def Get_Next_Instance_HO_Neg_HICO(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter % Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (str(image_id)).zfill(
8) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Object_augmented, action_HO, num_pos = Augmented_HO_Neg_HICO(GT, Trainval_Neg, im_shape,
Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['sp'] = Pattern
blobs['H_num'] = num_pos
return blobs
def Augmented_neg_box(bbox, shape, image_id, augment=15, bbox_list=[]):
thres_ = 0.25
# box = np.array([0, bbox[0], bbox[1], bbox[2], bbox[3]]).reshape(1, 5)
# box = box.astype(np.float64)
box = np.empty([1, 5], np.float64)
count = 0
time_count = 0
while count < augment:
time_count += 1
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
height_cen = (bbox[3] + bbox[1]) / 2
width_cen = (bbox[2] + bbox[0]) / 2
ratio = 1 + randint(-10, 10) * 0.01
height_shift = randint(-np.floor(height), np.floor(height))
height_shift = np.sign(height_shift) * 0.5 * height + height_shift
width_shift = randint(-np.floor(width), np.floor(width)) * 0.1
width_shift = np.sign(width_shift) * 0.5 * width + width_shift
H_0 = max(0, width_cen + width_shift - ratio * width / 2)
H_2 = min(shape[1] - 1, width_cen + width_shift + ratio * width / 2)
H_1 = max(0, height_cen + height_shift - ratio * height / 2)
H_3 = min(shape[0] - 1, height_cen + height_shift + ratio * height / 2)
valid_neg_box = True
for bbox1 in bbox_list:
if bb_IOU(bbox1, np.array([H_0, H_1, H_2, H_3])) > thres_:
valid_neg_box = False
break
if valid_neg_box:
box_ = np.array([0, H_0, H_1, H_2, H_3]).reshape(1, 5)
box = np.concatenate((box, box_), axis=0)
count += 1
if time_count > 150:
return box
return box
def obtain_data2_large(Pos_augment=15, Neg_select=60, augment_type=0, model_name='',
pattern_type=False, zero_shot_type=0, isalign=False, bnum=2, neg_type_ratio=0):
# bnum = 2
if pattern_type == 1:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_with_pose.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO_with_pose.pkl', "rb"), encoding='latin1')
else:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
# np.random.seed(0)
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, \
action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type,
pattern_type, zero_shot_type, isalign, 0, neg_type_ratio):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
buffer[3][-1][:, 0] = len(buffer[3]) - 1
buffer[4][-1][:, 0] = len(buffer[3]) - 1
if len(buffer[0]) >= bnum:
# if len(buffer[3][0]) < len(buffer[3][1]):
# # make sure the second batch is less.
# for i in range(len(buffer)):
# tmp = buffer[i][0]
# buffer[i][0] = buffer[i][1]
# buffer[i][1] = tmp
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
pos_semi_list = []
if model_name.__contains__('x5new'):
for b in range(bnum):
pos_semi_list.append(int(buffer[2][b] + (len(buffer[3][b]) - buffer[2][b]) // 8))
else:
for b in range(bnum):
pos_semi_list.append(buffer[2][b])
for ii in range(3, 7):
pos_h_boxes = np.concatenate([buffer[ii][pi][:pos2] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
neg_h_boxes = np.concatenate([buffer[ii][pi][pos2:] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
width = max([buffer[0][b].shape[1] for b in range(bnum)])
height = max([buffer[0][b].shape[2] for b in range(bnum)])
im_list = []
for b in range(bnum):
im_list.append(np.pad(buffer[0][b], [(0, 0), (0, max(0, width - buffer[0][b].shape[1])),
(0, max(0, height - buffer[0][b].shape[2])), (0, 0)],
mode='constant'))
width = max([buffer[7][b].shape[1] for b in range(bnum)])
height = max([buffer[7][b].shape[2] for b in range(bnum)])
yield np.concatenate(im_list, axis=0), buffer[1], sum(pos_semi_list), \
buffer[3], buffer[4], buffer[5], buffer[6], pos_semi_list[0]
buffer = [[] for i in range(8)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if pattern_type == 1:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([bnum, None, None, 3]),
tf.TensorShape([bnum, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
# tf.TensorShape([2, None, None, None, 1])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def Augmented_HO_Neg_HICO(GT, Trainval_Neg, shape, Pos_augment, Neg_select, pattern_type=False, isalign=False,
box_list=[],
real_neg_ratio=0):
"""
:param GT:
:param Trainval_Neg:
:param shape:
:param Pos_augment:
:param Neg_select:
:param pattern_type:
:param isalign:
:param box_list:
:param real_neg_ratio: This is for no action HOI (all zeros)
:return:
"""
image_id = GT[0]
Human = GT[2]
Object = GT[3]
action_HO_ = Generate_action_HICO(GT[1])
action_HO = action_HO_
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
max_augmented_nums = max(len(Human_augmented), len(Object_augmented))
if isalign:
while len(Human_augmented) < max_augmented_nums:
Human_augmented = np.concatenate(
[Human_augmented, Human_augmented[-(max_augmented_nums - len(Human_augmented)):]], axis=0)
if isalign:
while len(Object_augmented) < max_augmented_nums:
Object_augmented = np.concatenate(
[Object_augmented, Object_augmented[-(max_augmented_nums - len(Object_augmented)):]], axis=0)
# print("shape:", Human_augmented.shape, Object_augmented.shape)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
action_HO = np.tile(action_HO, [len(Human_augmented), 1])
if len(box_list) > 0 and real_neg_ratio > 0:
aug_neg_objs = Augmented_neg_box(Object, shape, image_id, int(Pos_augment * real_neg_ratio), bbox_list=box_list)
if len(aug_neg_objs) > 0:
aug_neg_humans = np.tile([Human_augmented[0]], [len(aug_neg_objs), 1])
aug_neg_actions = np.zeros([len(aug_neg_objs), 600], )
# print(aug_neg_objs.shape, Object_augmented.shape, Human_augmented.shape, aug_neg_humans.shape)
Human_augmented = np.concatenate([Human_augmented, aug_neg_humans])
Object_augmented = np.concatenate([Object_augmented, aug_neg_objs])
action_HO = np.concatenate([action_HO, aug_neg_actions])
num_pos = len(Human_augmented)
pose_list = []
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
num_pos_neg = len(Human_augmented)
pattern_channel = 2
Pattern = np.empty((0, 64, 64, pattern_channel), dtype=np.float32)
for i in range(num_pos_neg):
# Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
# there are poses for the negative sample
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:])
Pattern_ = Pattern_.reshape(1, 64, 64, pattern_channel)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, pattern_channel)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 600)
# print("shape1:", Human_augmented.shape, Object_augmented.shape, num_pos, Neg_select)
return Pattern, Human_augmented, Object_augmented, action_HO, num_pos
def obtain_data2(Pos_augment=15, Neg_select=60, augment_type=0, model_name='', pattern_type=False,
zero_shot_type=0, isalign=False, neg_type_ratio=0):
b_num = 2
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, Human_augmented, \
Object_augmented, action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment,
Neg_select,
augment_type, pattern_type,
zero_shot_type, isalign,
0):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
# buffer[8].append(pose_list)
# print(im_orig.shape, image_id, num_pos,
if len(buffer[0]) >= b_num:
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
if len(buffer[3][0]) < len(buffer[3][1]):
# make sure the second batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
buffer[3][1][:, 0] = 1
buffer[4][1][:, 0] = 1
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
if model_name.__contains__('x5new'):
pos1 = int(buffer[2][0] + (len(buffer[3][0]) - buffer[2][0]) // 8)
pos2 = int(buffer[2][1] + (len(buffer[3][1]) - buffer[2][1]) // 8)
else:
pos1 = buffer[2][0]
pos2 = buffer[2][1]
for ii in list(range(3, 7)):
pos_h_boxes = np.concatenate([buffer[ii][0][:pos1], buffer[ii][1][:pos2]], axis=0)
neg_h_boxes = np.concatenate([buffer[ii][0][pos1:], buffer[ii][1][pos2:]], axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
# buffer[ii] = np.concatenate([buffer[ii][0], buffer[ii][1]], axis=0)
buffer = buffer[:-1] + buffer[-1:]
im_shape1 = buffer[0][0].shape
im_shape2 = buffer[0][1].shape
width = max(im_shape1[1], im_shape2[1])
height = max(im_shape1[2], im_shape2[2])
im1 = np.pad(buffer[0][0],
[(0, 0), (0, max(0, width - im_shape1[1])), (0, max(0, height - im_shape1[2])), (0, 0)],
mode='constant')
im2 = np.pad(buffer[0][1],
[(0, 0), (0, max(0, width - im_shape2[1])), (0, max(0, height - im_shape2[2])), (0, 0)],
mode='constant')
split_idx = pos1
yield np.concatenate([im1, im2], axis=0), buffer[1], pos1 + pos2, buffer[3], buffer[4], buffer[5], \
buffer[6], split_idx
buffer = [[] for i in range(7 )]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if pattern_type == 1:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([2, None, None, 3]),
tf.TensorShape([2, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
# tf.TensorShape([2, None, None, None, 1])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def get_new_Trainval_GT(Trainval_GT, is_zero_shot, unseen_idx):
unseen_idx = set(unseen_idx)
if is_zero_shot > 0:
new_Trainval_GT = []
for item in Trainval_GT:
if len(set(list(item[1])).intersection(unseen_idx)) == 0:
new_Trainval_GT.append(item)
Trainval_GT = new_Trainval_GT
return Trainval_GT
def extract_semi_data(semi_type, model_name):
print(semi_type, '===========')
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl'
if semi_type == 'default':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl'
elif semi_type == 'coco':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi.pkl'
elif semi_type == 'coco2':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi_coco2.pkl'
elif semi_type == 'coco1': # train2017
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi1.pkl'
elif semi_type == 'rehico':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl'
elif semi_type == 'vcoco':
semi_pkl_path = cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_vcoco_semi.pkl'
if semi_type == 'both':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi.pkl', "rb"), encoding='latin1')
Trainval_semi1 = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
# Trainval_semi = Trainval_semi[:5000]
for item in Trainval_semi:
item[0] += MAX_HICO_ID
Trainval_semi.extend(Trainval_semi1)
elif semi_type == 'both1':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_vcoco_semi.pkl', "rb"),
encoding='latin1')
Trainval_semi1 = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
for item in Trainval_semi:
item[0] += MAX_HICO_ID
Trainval_semi.extend(Trainval_semi1)
pass
elif semi_type == 'bothzs':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi.pkl', "rb"), encoding='latin1')
Trainval_semi1 = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
# ids1 = [item[0] for item in Trainval_semi]
# ids2 = [item[0] for item in Trainval_semi1]
# ids = set(ids1).intersection(set(ids2))
# Trainval_semi = [item for item in Trainval_semi if item[0] not in ids]
zero_shot_type = get_zero_shot_type(model_name)
unseen_idx = get_unseen_index(zero_shot_type)
print(unseen_idx)
new_semi = []
print(len(Trainval_semi)) # 604907
for item in Trainval_semi:
item[0] += MAX_HICO_ID
# print(item)
if len(item[1]) > 0 and len(list(set(item[1]).intersection(set(unseen_idx)))) > 0:
new_semi.append(item)
print(len(new_semi), 'bothzs semi') # 524239 bothzs semi zs3 517008 bothzs semi zs4
print(type(Trainval_semi))
Trainval_semi = new_semi
Trainval_semi.extend(Trainval_semi1)
elif semi_type == 'cocozs':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi1.pkl', "rb"), encoding='latin1')
# ids1 = [item[0] for item in Trainval_semi]
# ids2 = [item[0] for item in Trainval_semi1]
# ids = set(ids1).intersection(set(ids2))
# Trainval_semi = [item for item in Trainval_semi if item[0] not in ids]
zero_shot_type = get_zero_shot_type(model_name)
unseen_idx = get_unseen_index(zero_shot_type)
# Trainval_semi1 = [item for item in Trainval_semi1 if len(list(set(item[1]).intersection(set(unseen_idx)))) == 0] # remove unseen objects.
print(unseen_idx)
new_semi = []
for item in Trainval_semi:
item[0] += MAX_HICO_ID
# print(item)
if len(item[1]) > 0 and len(list(set(item[1]).intersection(set(unseen_idx)))) > 0:
new_semi.append(item)
print(type(Trainval_semi))
Trainval_semi = new_semi
elif semi_type == 'coco3':
Trainval_semi = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_semi1.pkl', "rb"), encoding='latin1')
Trainval_semi1 = pickle.load(
open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_obj365_coco_semi_obj365_coco.pkl', "rb"), encoding='latin1')
for item in Trainval_semi:
item[0] += MAX_HICO_ID
for item in Trainval_semi1:
item[0] += MAX_COCO_ID
Trainval_semi.extend(Trainval_semi1)
else:
with open(semi_pkl_path, "rb") as f:
Trainval_semi = pickle.load(f, encoding='latin1')
if semi_type == 'coco' or semi_type == 'coco2' or semi_type == 'coco1' or semi_type == 'vcoco':
for item in Trainval_semi:
item[0] += MAX_HICO_ID
if semi_type == 'rehico' and model_name.__contains__('_zs11'):
zero_shot_type = get_zero_shot_type(model_name)
unseen_idx = get_unseen_index(zero_shot_type)
Trainval_semi = get_new_Trainval_GT(Trainval_semi, zero_shot_type, unseen_idx)
# Trainval_semi = [item for item in Trainval_semi if
# len(list(set(item[1]).intersection(set(unseen_idx)))) == 0] # remove unseen objects.
pass
return Trainval_semi
def obtain_data2_large(Pos_augment=15, Neg_select=60, augment_type=0, model_name='',
pattern_type=False, zero_shot_type=0, isalign=False, bnum=2, neg_type_ratio=0):
# bnum = 2
if pattern_type == 1:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_with_pose.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO_with_pose.pkl', "rb"), encoding='latin1')
else:
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb"), encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(8)]
import time
st = time.time()
count_time = 0
avg_time = 0
# np.random.seed(0)
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, \
action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type,
pattern_type, zero_shot_type, isalign, 0):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
buffer[3][-1][:, 0] = len(buffer[3]) - 1
buffer[4][-1][:, 0] = len(buffer[3]) - 1
if len(buffer[0]) >= bnum:
# if len(buffer[3][0]) < len(buffer[3][1]):
# # make sure the second batch is less.
# for i in range(len(buffer)):
# tmp = buffer[i][0]
# buffer[i][0] = buffer[i][1]
# buffer[i][1] = tmp
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
pos_semi_list = []
if model_name.__contains__('x5new'):
for b in range(bnum):
pos_semi_list.append(int(buffer[2][b] + (len(buffer[3][b]) - buffer[2][b]) // 8))
else:
for b in range(bnum):
pos_semi_list.append(buffer[2][b])
for ii in range(3, 7):
pos_h_boxes = np.concatenate([buffer[ii][pi][:pos2] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
neg_h_boxes = np.concatenate([buffer[ii][pi][pos2:] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
width = max([buffer[0][b].shape[1] for b in range(bnum)])
height = max([buffer[0][b].shape[2] for b in range(bnum)])
im_list = []
for b in range(bnum):
im_list.append(np.pad(buffer[0][b], [(0, 0), (0, max(0, width - buffer[0][b].shape[1])),
(0, max(0, height - buffer[0][b].shape[2])), (0, 0)],
mode='constant'))
yield np.concatenate(im_list, axis=0), buffer[1], sum(pos_semi_list), \
buffer[3], buffer[4], buffer[5], buffer[6], pos_semi_list[0]
buffer = [[] for i in range(8)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if pattern_type == 1:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([bnum, None, None, 3]),
tf.TensorShape([bnum, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def obtain_batch_data_semi1(Pos_augment=15, Neg_select=60, augment_type=0, model_name='', pattern_type=0,
zero_shot_type=0, isalign=False, epoch=0, semi_type='default', bnum=2, neg_type_ratio=0):
assert len(model_name) > 1, model_name
with open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
Trainval_semi = extract_semi_data(semi_type, model_name)
with open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb") as f:
Trainval_N = pickle.load(f, encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
# np.random.seed(0)
semi_g = generator2(Trainval_semi, {}, Pos_augment, Neg_select, augment_type, False, zero_shot_type, isalign,
epoch, )
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, \
action_HO, Pattern in g_func(Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type,
pattern_type, zero_shot_type, False, epoch,
):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
for b in range(bnum):
im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern, = next(semi_g)
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
buffer[3][b + 1][:, 0] = b + 1
buffer[4][b + 1][:, 0] = b + 1
assert num_pos == len(Human_augmented)
# print(buffer[3])
# print(len(buffer[0]))
# print("inner hint:", buffer[1], 'num_pos:', buffer[2], 'len of h boxes:',len(buffer[3][0]), len(buffer[3][1]),
# len(buffer[4][0]), len(buffer[4][1]), len(buffer[5][0]), len(buffer[5][1]), len(buffer[6][0]), len(buffer[6][1]))
pos_semi_list = []
if model_name.__contains__('x5new'):
pos1 = int(buffer[2][0] + (len(buffer[3][0]) - buffer[2][0]) // 8)
assert len(buffer[3][1]) == buffer[2][1], (len(buffer[3][1]), buffer[2][1],)
# print(pos1, (len(buffer[3][b+1]) - buffer[2][b+1]) // 8)
for b in range(bnum):
pos_semi_list.append(int(buffer[2][b + 1] + (len(buffer[3][b + 1]) - buffer[2][b + 1]) // 8))
else:
pos1 = buffer[2][0]
for b in range(bnum):
pos_semi_list.append(buffer[2][b + 1])
# print('before', buffer[3])
for ii in range(3, 7):
pos_h_boxes = np.concatenate(
[buffer[ii][0][:pos1]] + [buffer[ii][pi + 1][:pos2] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
neg_h_boxes = np.concatenate(
[buffer[ii][0][pos1:]] + [buffer[ii][pi + 1][pos2:] for pi, pos2 in enumerate(pos_semi_list)],
axis=0)
buffer[ii] = np.concatenate([pos_h_boxes, neg_h_boxes], axis=0)
# buffer[ii] = np.concatenate([buffer[ii][0], buffer[ii][1]], axis=0)
# print('after', buffer[3])
width = max([buffer[0][b].shape[1] for b in range(bnum + 1)])
height = max([buffer[0][b].shape[2] for b in range(bnum + 1)])
im_list = []
for b in range(bnum + 1):
im_list.append(np.pad(buffer[0][b], [(0, 0), (0, max(0, width - buffer[0][b].shape[1])),
(0, max(0, height - buffer[0][b].shape[2])), (0, 0)],
mode='constant'))
width = max([buffer[7][b].shape[1] for b in range(bnum + 1)])
height = max([buffer[7][b].shape[2] for b in range(bnum + 1)])
split_idx = pos1
yield np.concatenate(im_list, axis=0), buffer[1], pos1 + sum(pos_semi_list), \
buffer[3], buffer[4], buffer[5], buffer[6], split_idx
buffer = [[] for i in range(7)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([bnum + 1, None, None, 3]),
tf.TensorShape([bnum + 1, ]),
tf.TensorShape([]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
# tf.TensorShape([2, None, None, None, 1])
)
)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx
def Augmented_HO_Neg_HICO2(GT, Trainval_Neg, shape, Pos_augment, Neg_select, pose_type=0, isalign=False):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
action_HO_ = Generate_action_HICO(GT[1])
action_HO = action_HO_
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
if isalign:
while len(Human_augmented) < Pos_augment + 1:
Human_augmented = np.concatenate(
[Human_augmented, Human_augmented[-(Pos_augment + 1 - len(Human_augmented)):]], axis=0)
if isalign:
while len(Object_augmented) < Pos_augment + 1:
Object_augmented = np.concatenate(
[Object_augmented, Object_augmented[-(Pos_augment + 1 - len(Human_augmented)):]], axis=0)
# print("shape:", Human_augmented.shape, Object_augmented.shape)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
if isalign:
assert len(Human_augmented) == Pos_augment + 1, (len(Human_augmented), Pos_augment)
num_pos = len(Human_augmented)
if pose_type > 0: pose_list = [GT[5]] * num_pos
for i in range(num_pos - 1):
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
if pose_type > 0: pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
if pose_type > 0: pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
num_pos_neg = len(Human_augmented)
if pose_type > 0:
pattern_channel = 3
else:
pattern_channel = 2
Pattern = np.empty((0, 64, 64, pattern_channel), dtype=np.float32)
for i in range(num_pos_neg):
# Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
# there are poses for the negative sample
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:])
Pattern_ = Pattern_.reshape(1, 64, 64, pattern_channel)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, pattern_channel)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 600)
# print("shape1:", Human_augmented.shape, Object_augmented.shape, num_pos, Neg_select)
return Pattern, Human_augmented, Object_augmented, action_HO, num_pos
def coco_generator1(Pos_augment=15, Neg_select=30, augment_type=0, with_pose=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO.pkl', "rb"), encoding='latin1')
Neg_select1, Pos_augment1, inters_per_img = get_aug_params(Neg_select, Pos_augment, augment_type)
index_list = list(range(0, len(Trainval_GT)))
print("generator1", inters_per_img, Pos_augment1, 'Neg_select:', Neg_select1, augment_type)
import math
img_id_index_map = {}
for i, gt in enumerate(Trainval_GT):
img_id = gt[0]
if img_id in img_id_index_map:
img_id_index_map[img_id].append(i)
else:
img_id_index_map[img_id] = [i]
img_id_list = list(img_id_index_map.keys())
for k, v in img_id_index_map.items():
for i in range(math.ceil(len(v) * 1.0 / inters_per_img) - 1):
img_id_list.append(k)
import copy
while True:
running_map = copy.deepcopy(img_id_index_map)
# print('Step: ', i)
np.random.shuffle(index_list)
for k in running_map.keys():
np.random.shuffle(running_map[k])
for img_id_tmp in img_id_list:
gt_ids = running_map[img_id_tmp][:inters_per_img]
running_map[img_id_tmp] = running_map[img_id_tmp][inters_per_img:]
image_id = img_id_tmp
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
print('not exist', im_file)
import cv2
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im.shape
blobs = {}
blobs['H_boxes'] = np.empty([0, 5], dtype=np.float32)
blobs['Hsp_boxes'] = np.empty([0, 5], dtype=np.float32)
blobs['O_boxes'] = np.empty([0, 5], dtype=np.float32)
blobs['gt_class_sp'] = np.empty([0, 29], dtype=np.float32)
blobs['gt_class_HO'] = np.empty([0, 29], dtype=np.float32)
blobs['gt_class_H'] = np.empty([0, 29], dtype=np.float32)
blobs['gt_class_C'] = np.empty([0, 238], dtype=np.float32)
blobs['Mask_sp'] = np.empty([0, 29], dtype=np.float32)
blobs['Mask_HO'] = np.empty([0, 29], dtype=np.float32)
blobs['Mask_H'] = np.empty([0, 29], dtype=np.float32)
blobs['sp'] = np.empty([0, 64, 64, 2], dtype=np.float32)
for i in gt_ids:
GT = Trainval_GT[i]
assert GT[0] == image_id
# im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
cur_neg_select = Neg_select1
cur_pos_augment = Pos_augment1
if augment_type > 1:
if i == gt_ids[-1]:
cur_neg_select = Neg_select1 * len(gt_ids)
else:
cur_neg_select = 0
else:
cur_neg_select = Neg_select1
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, action_compose = Augmented_HO_spNeg(GT,
Trainval_N,
im_shape,
Pos_augment=cur_pos_augment,
Neg_select=cur_neg_select)
# blobs['image'] = im_orig
blobs['H_boxes'] = np.concatenate((blobs['H_boxes'], Human_augmented), axis=0)
blobs['Hsp_boxes'] = np.concatenate((blobs['Hsp_boxes'], Human_augmented_sp), axis=0)
blobs['O_boxes'] = np.concatenate((blobs['O_boxes'], Object_augmented), axis=0)
blobs['gt_class_sp'] = np.concatenate((blobs['gt_class_sp'], action_sp), axis=0)
blobs['gt_class_HO'] = np.concatenate((blobs['gt_class_HO'], action_HO), axis=0)
blobs['gt_class_H'] = np.concatenate((blobs['gt_class_H'], action_H), axis=0)
blobs['gt_class_C'] = np.concatenate((blobs['gt_class_C'], action_compose), axis=0)
blobs['Mask_sp'] = np.concatenate((blobs['Mask_sp'], mask_sp), axis=0)
blobs['Mask_HO'] = np.concatenate((blobs['Mask_HO'], mask_HO), axis=0)
blobs['Mask_H'] = np.concatenate((blobs['Mask_H'], mask_H), axis=0)
blobs['sp'] = np.concatenate((blobs['sp'], Pattern), axis=0)
yield (im_orig, image_id, len(blobs['gt_class_H']), blobs)
def coco_generator(Pos_augment=15, Neg_select=30, augment_type=0, with_pose=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_with_pose_obj.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO_with_pose_obj.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
set_list = [(0, 38), (1, 31), (1, 32), (2, 43), (2, 44), (2, 77), (4, 1), (4, 19), (4, 28), (4, 46), (4, 47),
(4, 48), (4, 49), (4, 51), (4, 52), (4, 54), (4, 55), (4, 56), (5, 2), (5, 3), (5, 4), (5, 6), (5, 7),
(5, 8), (5, 9), (5, 18), (5, 21), (6, 68), (7, 33), (8, 64), (9, 47), (9, 48), (9, 49), (9, 50),
(9, 51), (9, 52), (9, 53), (9, 54), (9, 55), (9, 56), (10, 2), (10, 4), (10, 14), (10, 18), (10, 21),
(10, 25), (10, 27), (10, 29), (10, 57), (10, 58), (10, 60), (10, 61), (10, 62), (10, 64), (11, 31),
(11, 32), (11, 37), (11, 38), (12, 14), (12, 57), (12, 58), (12, 60), (12, 61), (13, 40), (13, 41),
(13, 42), (13, 46), (14, 1), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30), (14, 31), (14, 32),
(14, 33), (14, 34), (14, 35), (14, 37), (14, 38), (14, 39), (14, 40), (14, 41), (14, 42), (14, 47),
(14, 50), (14, 68), (14, 74), (14, 75), (14, 78), (15, 30), (15, 33), (16, 43), (16, 44), (16, 45),
(18, 1), (18, 2), (18, 3), (18, 4), (18, 5), (18, 6), (18, 7), (18, 8), (18, 11), (18, 14), (18, 15),
(18, 16), (18, 17), (18, 18), (18, 19), (18, 20), (18, 21), (18, 24), (18, 25), (18, 26), (18, 27),
(18, 28), (18, 29), (18, 30), (18, 31), (18, 32), (18, 33), (18, 34), (18, 35), (18, 36), (18, 37),
(18, 38), (18, 39), (18, 40), (18, 41), (18, 42), (18, 43), (18, 44), (18, 45), (18, 46), (18, 47),
(18, 48), (18, 49), (18, 51), (18, 53), (18, 54), (18, 55), (18, 56), (18, 57), (18, 61), (18, 62),
(18, 63), (18, 64), (18, 65), (18, 66), (18, 67), (18, 68), (18, 73), (18, 74), (18, 75), (18, 77),
(19, 35), (19, 39), (20, 33), (21, 31), (21, 32), (23, 1), (23, 11), (23, 19), (23, 20), (23, 24),
(23, 28), (23, 34), (23, 49), (23, 53), (23, 56), (23, 61), (23, 63), (23, 64), (23, 67), (23, 68),
(23, 73), (24, 74), (25, 1), (25, 2), (25, 4), (25, 8), (25, 9), (25, 14), (25, 15), (25, 16), (25, 17),
(25, 18), (25, 19), (25, 21), (25, 25), (25, 26), (25, 27), (25, 28), (25, 29), (25, 30), (25, 31),
(25, 32), (25, 33), (25, 34), (25, 35), (25, 36), (25, 37), (25, 38), (25, 39), (25, 40), (25, 41),
(25, 42), (25, 43), (25, 44), (25, 45), (25, 46), (25, 47), (25, 48), (25, 49), (25, 50), (25, 51),
(25, 52), (25, 53), (25, 54), (25, 55), (25, 56), (25, 57), (25, 64), (25, 65), (25, 66), (25, 67),
(25, 68), (25, 73), (25, 74), (25, 77), (25, 78), (25, 79), (25, 80), (26, 32), (26, 37), (28, 30),
(28, 33)]
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = Augmented_HO_spNeg(GT, Trainval_N,
im_shape,
Pos_augment,
Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
yield (im_orig, image_id, len(action_H), blobs)
def obtain_coco_data(Pos_augment=15, Neg_select=30, augment_type=0):
if augment_type == 0:
g = coco_generator
else:
g = coco_generator1
# generator()
dataset = tf.data.Dataset.from_generator(partial(g, Pos_augment, Neg_select, augment_type),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(
tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, 29]),
'gt_class_HO': tf.TensorShape([None, 29]),
'gt_class_H': tf.TensorShape([None, 29]),
'gt_class_C': tf.TensorShape([None, 238]),
'Mask_sp': tf.TensorShape([None, 29]),
'Mask_HO': tf.TensorShape([None, 29]),
'Mask_H': tf.TensorShape([None, 29]),
'sp': tf.TensorShape([None, 64, 64, 3]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs = iterator.get_next()
return image, image_id, num_pos, blobs
# image, num_pos = iterator.get_next()
# return image, num_pos
def obtain_coco_data1(Pos_augment=15, Neg_select=30, augment_type=0, with_pose=False, is_zero_shot=0):
if augment_type == 0:
g_func = coco_generator
else:
g_func = coco_generator1
def generator3(Pos_augment, Neg_select, augment_type, with_pose, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, with_pose, is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
if len(buffer[0]) > 1:
if buffer[2][0] < buffer[2][1]:
# make sure the first batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, with_pose, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, 29]),
'gt_class_HO': tf.TensorShape([None, 29]),
'gt_class_H': tf.TensorShape([None, 29]),
'gt_class_C': tf.TensorShape([None, 238]),
'Mask_sp': tf.TensorShape([None, 29]),
'Mask_HO': tf.TensorShape([None, 29]),
'Mask_H': tf.TensorShape([None, 29]),
'sp': tf.TensorShape([None, 64, 64, 3]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, 29]),
'gt_class_HO': tf.TensorShape([None, 29]),
'gt_class_H': tf.TensorShape([None, 29]),
'gt_class_C': tf.TensorShape([None, 238]),
'Mask_sp': tf.TensorShape([None, 29]),
'Mask_HO': tf.TensorShape([None, 29]),
'Mask_H': tf.TensorShape([None, 29]),
'sp': tf.TensorShape([None, 64, 64, 3]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def obtain_coco_data_hoicoco_24(Pos_augment = 15, Neg_select=30, augment_type = 0, pattern_type=False, is_zero_shot=0, type=0):
if type == 0:
verb_num = 24
g_func = coco_generator2
elif type == 1:
verb_num = 21
g_func = coco_generator3
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
if len(buffer[0]) > 1:
if buffer[2][0] < buffer[2][1]:
# make sure the first batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0],buffer[0][1], buffer[1][1], buffer[2][1],buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
dataset = tf.data.Dataset.from_generator(partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'pose_box':tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
},tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'pose_box': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'pose_box': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
},tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'pose_box': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def get_new_Trainval_N(Trainval_N, is_zero_shot, unseen_idx):
if is_zero_shot > 0:
new_Trainval_N = {}
for k in Trainval_N.keys():
new_Trainval_N[k] = []
for item in Trainval_N[k]: # the original code include a bug (k is wrongly set to 4)
if item[1] not in unseen_idx:
new_Trainval_N[k].append(item)
Trainval_N = new_Trainval_N
return Trainval_N
def get_zero_shot_type(model_name):
zero_shot_type = 0
if model_name.__contains__('_zs_'):
# for open long-tailed hoi detection
zero_shot_type = 7
elif model_name.__contains__('zsnrare'):
zero_shot_type = 4
elif model_name.__contains__('_zsrare_'):
zero_shot_type = 3
elif model_name.__contains__('_zsuo_'):
# for unseen object
zero_shot_type = 11
elif model_name.__contains__('_zs3_'):
# for VCL model
zero_shot_type = 3
elif model_name.__contains__('_zs4_'):
zero_shot_type = 4
return zero_shot_type
def get_epoch_iters(model_name):
epoch_iters = 43273
if model_name.__contains__('zsnrare'):
epoch_iters = 20000
elif model_name.__contains__('zs_'):
epoch_iters = 20000
elif model_name.__contains__('_zs4_'):
epoch_iters = 20000
elif model_name.__contains__('zsrare'):
epoch_iters = 40000
else:
epoch_iters = 43273
return epoch_iters
def get_augment_type(model_name):
augment_type = 0
if model_name.__contains__('_aug5'):
augment_type = 4
elif model_name.__contains__('_aug6'):
augment_type = 5
else:
# raise Exception('params wrong', args.model)
pass
return augment_type
def get_unseen_index(zero_shot_type):
unseen_idx = None
if zero_shot_type == 3:
# rare first
unseen_idx = [509, 279, 280, 402, 504, 286, 499, 498, 289, 485, 303, 311, 325, 439, 351, 358, 66, 427, 379, 418,
70, 416,
389, 90, 395, 76, 397, 84, 135, 262, 401, 592, 560, 586, 548, 593, 526, 181, 257, 539, 535, 260,
596, 345, 189,
205, 206, 429, 179, 350, 405, 522, 449, 261, 255, 546, 547, 44, 22, 334, 599, 239, 315, 317, 229,
158, 195,
238, 364, 222, 281, 149, 399, 83, 127, 254, 398, 403, 555, 552, 520, 531, 440, 436, 482, 274, 8,
188, 216, 597,
77, 407, 556, 469, 474, 107, 390, 410, 27, 381, 463, 99, 184, 100, 292, 517, 80, 333, 62, 354,
104, 55, 50,
198, 168, 391, 192, 595, 136, 581]
elif zero_shot_type == 4:
# non rare first
unseen_idx = [38, 41, 20, 18, 245, 11, 19, 154, 459, 42, 155, 139, 60, 461, 577, 153, 582, 89, 141, 576, 75,
212, 472, 61,
457, 146, 208, 94, 471, 131, 248, 544, 515, 566, 370, 481, 226, 250, 470, 323, 169, 480, 479, 230,
385, 73,
159, 190, 377, 176, 249, 371, 284, 48, 583, 53, 162, 140, 185, 106, 294, 56, 320, 152, 374, 338,
29, 594, 346,
456, 589, 45, 23, 67, 478, 223, 493, 228, 240, 215, 91, 115, 337, 559, 7, 218, 518, 297, 191, 266,
304, 6, 572,
529, 312, 9, 308, 417, 197, 193, 163, 455, 25, 54, 575, 446, 387, 483, 534, 340, 508, 110, 329,
246, 173, 506,
383, 93, 516, 64]
elif zero_shot_type == 11:
unseen_idx = [111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
126, 127, 128, 224, 225, 226, 227, 228, 229, 230, 231, 290, 291, 292, 293,
294, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 336, 337,
338, 339, 340, 341, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
429, 430, 431, 432, 433, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462,
463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 533, 534, 535, 536,
537, 558, 559, 560, 561, 595, 596, 597, 598, 599]
# miss [ 5, 6, 28, 56, 88] verbs 006 break 007 brush_with 029 flip 057 move 089 slide
elif zero_shot_type == 7:
# 24 rare merge of zs3 & zs4
unseen_idx = [509, 279, 280, 402, 504, 286, 499, 498, 289, 485, 303, 311, 325, 439, 351, 358, 66, 427, 379, 418, 70, 416, 389,
90, 38, 41, 20, 18, 245, 11, 19, 154, 459, 42, 155, 139, 60, 461, 577, 153, 582, 89, 141, 576, 75, 212, 472, 61,
457, 146, 208, 94, 471, 131, 248, 544, 515, 566, 370, 481, 226, 250, 470, 323, 169, 480, 479, 230, 385, 73, 159,
190, 377, 176, 249, 371, 284, 48, 583, 53, 162, 140, 185, 106, 294, 56, 320, 152, 374, 338, 29, 594, 346, 456, 589,
45, 23, 67, 478, 223, 493, 228, 240, 215, 91, 115, 337, 559, 7, 218, 518, 297, 191, 266, 304, 6, 572, 529, 312,
9]
# 22529, 14830, 22493, 17411, 21912,
return unseen_idx
def generator2(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type, pattern_type, zero_shot_type, isalign,
epoch=0):
"""
:param Trainval_GT:
:param Trainval_N:
:param Pos_augment:
:param Neg_select:
:param augment_type:
:param pattern_type:
:return:
"""
# import skimage
# assert skimage.__version__ == '0.14.2', "The version of skimage might affect the speed largely. I use 0.14.2"
Neg_select1, Pos_augment1, inters_per_img = get_aug_params(Neg_select, Pos_augment, augment_type)
unseen_idx = get_unseen_index(zero_shot_type)
Trainval_N = get_new_Trainval_N(Trainval_N, zero_shot_type, unseen_idx)
print("generator2", inters_per_img, Pos_augment1, 'Neg_select:', Neg_select1, augment_type, 'zero shot:',
zero_shot_type)
import math
img_id_index_map = {}
for i, gt in enumerate(Trainval_GT):
img_id = gt[0]
if img_id in img_id_index_map:
img_id_index_map[img_id].append(i)
else:
img_id_index_map[img_id] = [i]
img_id_list = list(img_id_index_map.keys())
for k, v in img_id_index_map.items():
for i in range(math.ceil(len(v) * 1.0 / inters_per_img) - 1):
img_id_list.append(k)
import copy
import time
st = time.time()
count_time = 0
avg_time = 0
while True:
running_map = copy.deepcopy(img_id_index_map)
# print('Step: ', i)
np.random.shuffle(img_id_list)
for k in running_map.keys():
np.random.shuffle(running_map[k])
for img_id_tmp in img_id_list:
gt_ids = running_map[img_id_tmp][:inters_per_img]
running_map[img_id_tmp] = running_map[img_id_tmp][inters_per_img:]
Pattern_list = []
Human_augmented_list = []
Object_augmented_list = []
action_HO_list = []
num_pos_list = 0
mask_all_list = []
image_id = img_id_tmp
if image_id in [528, 791, 1453, 2783, 3489, 3946, 3946, 11747, 11978, 12677, 16946, 17833, 19218, 19218,
22347, 27293, 27584, 28514, 33683, 35399]:
# This is a list contain multiple objects within the same object box. It seems like wrong annotations.
# We remove those images. This do not affect the performance in our experiment.
continue
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (
str(image_id)).zfill(
8) + '.jpg'
# id, gt, h, o
# print(gt_ids, gt_ids[0], Trainval_GT[gt_ids[0]])
import cv2
import os
if not os.path.exists(im_file):
print('not exist', im_file)
continue
im = cv2.imread(im_file)
if im is None:
print('node', im_file)
continue
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im.shape
import os
# print('generate batch read image:', time.time() - st, "average;", avg_time)
for i in gt_ids:
GT = Trainval_GT[i]
# rare data
if zero_shot_type > 0:
has_rare = False
for label in GT[1]:
if label in unseen_idx:
has_rare = True
if has_rare:
continue
assert GT[0] == image_id
# im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
cur_pos_augment = Pos_augment1
if augment_type > 1:
if i == gt_ids[-1]: # This must be -1
cur_neg_select = Neg_select1 * len(gt_ids)
else:
cur_neg_select = 0
else:
cur_neg_select = Neg_select1
# st1 = time.time()
Pattern, Human_augmented, Object_augmented, action_HO, num_pos = Augmented_HO_Neg_HICO(
GT,
Trainval_N,
im_shape,
Pos_augment=cur_pos_augment,
Neg_select=cur_neg_select,
pattern_type=pattern_type,
isalign=isalign)
# maintain same number of augmentation,
# print('generate batch read image:', i, time.time() - st1, cur_neg_select, len(Trainval_N[image_id]) if image_id in Trainval_N else 0)
Pattern_list.append(Pattern)
Human_augmented_list.append(Human_augmented)
Object_augmented_list.append(Object_augmented)
action_HO_list.append(action_HO)
num_pos_list += num_pos
# print('item:', Pattern.shape, num_pos)
if len(Pattern_list) <= 0:
continue
Pattern = np.concatenate(Pattern_list, axis=0)
Human_augmented = np.concatenate(Human_augmented_list, axis=0)
Object_augmented = np.concatenate(Object_augmented_list, axis=0)
action_HO = np.concatenate(action_HO_list, axis=0)
num_pos = num_pos_list
im_orig = np.expand_dims(im_orig, axis=0)
yield (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
if augment_type < 0:
break
def get_aug_params(Neg_select, Pos_augment, augment_type):
Pos_augment1 = Pos_augment
Neg_select1 = Neg_select
inters_per_img = 2
if augment_type == 0:
inters_per_img = 1
Pos_augment1 = 15
Neg_select1 = 60
elif augment_type == 4:
inters_per_img = 5
Pos_augment1 = 6
Neg_select1 = 24
elif augment_type == 5:
inters_per_img = 7
Pos_augment1 = 10
Neg_select1 = 40
return Neg_select1, Pos_augment1, inters_per_img
def get_vcoco_aug_params(Neg_select, Pos_augment, augment_type):
Pos_augment1 = Pos_augment
Neg_select1 = Neg_select
inters_per_img = 2
if augment_type == 0:
inters_per_img = 1
Pos_augment1 = 15
Neg_select1 = 30
elif augment_type == 1:
inters_per_img = 2
Pos_augment1 = 15
Neg_select1 = 30
elif augment_type == 2:
inters_per_img = 3
Pos_augment1 = 15
Neg_select1 = 30
elif augment_type == -1:
inters_per_img = 1
Pos_augment1 = 0
Neg_select1 = 0
return Neg_select1, Pos_augment1, inters_per_img
def obtain_data(Pos_augment=15, Neg_select=60, augment_type=0, pattern_type=0, zero_shot_type=0, isalign=False,
epoch=0, coco=False, neg_type=0):
with open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb") as f:
Trainval_N = pickle.load(f, encoding='latin1')
if not coco:
with open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
elif coco == 2:
# 115904
with open(cfg.DATA_DIR + '/' + 'new_list_pickle_2.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
elif coco == 3:
# 115904
with open(cfg.DATA_DIR + '/' + 'new_list_pickle_3.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
with open(cfg.DATA_DIR + '/' + 'new_neg_dict.pkl', "rb") as f:
Trainval_N1 = pickle.load(f, encoding='latin1')
for k in Trainval_N:
if k in Trainval_N1:
Trainval_N[k].extend(Trainval_N1[k])
else:
print('Trainval_GT_HICO_COCO')
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO_COCO.pkl', "rb"), encoding='latin1')
dataset = tf.data.Dataset.from_generator(partial(generator2, Trainval_GT, Trainval_N, Pos_augment, Neg_select,
augment_type, pattern_type, zero_shot_type, isalign, epoch,
), output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32),
output_shapes=(
tf.TensorShape([1, None, None, 3]), tf.TensorShape([]),
tf.TensorShape([]),
tf.TensorShape([None, 5]), tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, 2])))
# (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp
def obtain_test_data(Pos_augment=15, Neg_select=60, augment_type=0, with_pose=False, large_neg_for_ho=False,
isalign=False):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Test_GT_HICO.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Test_GT_HICO.pkl', "rb"), encoding='latin1')
g = generator2
dataset = tf.data.Dataset.from_generator(
partial(g, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type, with_pose, 0, isalign),
output_types=(
tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32),
output_shapes=(
tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
tf.TensorShape([None, 5]), tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, 2]),
))
# (im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern)
# dataset = tf.data.Dataset.from_generator(gen, output_types=(tf.float32, tf.int32),
# output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([])))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp = iterator.get_next()
return image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp
def obtain_coco_data_hoicoco(Pos_augment=15, Neg_select=30, augment_type=0, pattern_type=False, is_zero_shot=0, type=0):
if type == 1:
verb_num = 21
g_func = coco_generator3
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type,
is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
if len(buffer[0]) > 1:
if buffer[2][0] < buffer[2][1]:
# make sure the first batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def coco_generator2(Pos_augment = 15, Neg_select=30, augment_type = 0, pattern_type=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_24.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO_obj_24.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = Augmented_HO_spNeg2(GT, Trainval_N, im_shape, Pos_augment, Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
# blobs['H_num'] = len(action_H)
# print(image_id, len(action_H))
yield (im_orig, image_id, len(action_H), blobs)
# print(i, image_id, len(Trainval_GT))
# i += 1
# i = i % len(Trainval_GT)
def coco_generator3(Pos_augment = 15, Neg_select=30, augment_type = 0, pattern_type=False, is_zero_shot=0):
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_21.pkl', "rb"), encoding='latin1')
Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO_obj_21.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
print(len(index_list))
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = Augmented_HO_spNeg3(GT, Trainval_N, im_shape, Pos_augment, Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
yield (im_orig, image_id, len(action_H), blobs)
if augment_type < 0:
break
def coco_generator_atl(Pos_augment = 15, Neg_select=0, augment_type = 0, pattern_type=False, is_zero_shot=0, type =0, vcoco_type = 21):
"""
Here, the name semi means atl. For objects, we do not have verb labels. Thus, we can only provide object id.
"""
print(type)
if type == 0:
# coco 2014 570834 length
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_semi.pkl', "rb"), encoding='latin1')
elif type == 2:
# hico 68389 length
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_hico_obj_semi_21.pkl', "rb"),
encoding='latin1')
elif type == 3:
# both
Trainval_GT_hico = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_hico_obj_semi_21.pkl', "rb"),
encoding='latin1')
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_semi_21.pkl', "rb"),
encoding='latin1')
for item in Trainval_GT:
item[0] += MAX_HICO_ID
Trainval_GT.extend(Trainval_GT_hico)
elif type == 4:
# --- 42631
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_vcoco_obj_semi_21.pkl', "rb"),
encoding='latin1')
elif type == 5:
# vcoco
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_vcoco1_obj_semi_21.pkl', "rb"),
encoding='latin1')
else:
# coco 2014 train 570834
Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO_obj_semi_21.pkl', "rb"), encoding='latin1')
i = 0
index_list = list(range(0, len(Trainval_GT)))
if vcoco_type == 24:
g_func = Augmented_HO_spNeg2
else:
g_func = Augmented_HO_spNeg3
while True:
# print('Step: ', i)
np.random.shuffle(index_list)
for i in index_list:
GT = Trainval_GT[i]
image_id = GT[0]
if type == 2:
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (
str(image_id)).zfill(
8) + '.jpg'
elif type == 3:
if image_id < MAX_HICO_ID:
# obj365
tmp_id = image_id
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (
str(image_id)).zfill(
8) + '.jpg'
pass
else:
tmp_id = image_id - MAX_HICO_ID
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(tmp_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (
str(tmp_id)).zfill(12) + '.jpg'
if not os.path.exists(im_file):
print(im_file)
import os
if not os.path.exists(im_file):
print(im_file)
elif type == 6:
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (
str(image_id)).zfill(12) + '.jpg'
if not os.path.exists(im_file):
print(im_file)
elif type == 7:
if image_id >= MAX_COCO_ID:
# obj365
tmp_id = image_id - MAX_COCO_ID
im_file = cfg.LOCAL_DATA + '/dataset/Objects365/Images/train/train/obj365_train_' + (str(tmp_id)).zfill(
12) + '.jpg'
pass
else:
tmp_id = image_id
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(tmp_id)).zfill(
12) + '.jpg'
import os
if not os.path.exists(im_file):
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/val2014/COCO_val2014_' + (
str(tmp_id)).zfill(12) + '.jpg'
if not os.path.exists(im_file):
print(im_file)
import os
if not os.path.exists(im_file):
print(im_file)
else:
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(
12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, \
action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, gt_compose = g_func(GT, {}, im_shape, Pos_augment, Neg_select)
blobs = {}
# blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['gt_class_C'] = gt_compose
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
# blobs['H_num'] = len(action_H)
# print(image_id, len(action_H))
yield (im_orig, image_id, len(action_H), blobs)
# print(i, image_id, len(Trainval_GT))
# i += 1
# i = i % len(Trainval_GT)
def obtain_coco_data2(Pos_augment = 15, Neg_select=30, augment_type = 0, type =0 ):
# Trainval_GT = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_GT_VCOCO.pkl', "rb"), encoding='latin1')
# Trainval_N = pickle.load(open(cfg.DATA_DIR + '/' + 'Trainval_Neg_VCOCO.pkl', "rb"), encoding='latin1')
if type == 0:
compose_classes = 222
verb_num = 24
g_func = coco_generator2
elif type == 1:
compose_classes = 222
verb_num = 21
g_func = coco_generator3
elif type == 2:
compose_classes = 238
verb_num = 29
g_func = coco_generator1
# generator()
dataset = tf.data.Dataset.from_generator(partial(g_func, Pos_augment, Neg_select, augment_type), output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, compose_classes]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs = iterator.get_next()
return image, image_id, num_pos, blobs
# image, num_pos = iterator.get_next()
# return image, num_pos
def obtain_coco_data_atl(Pos_augment=15, Neg_select=30, augment_type=0, pattern_type=False, is_zero_shot=0, type=0, vcoco_type=21):
if vcoco_type == 21:
verb_num = 21
g_func = coco_generator3
elif vcoco_type == 24:
verb_num = 24
g_func = coco_generator2
else:
# default
verb_num = 21
g_func = coco_generator3
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
semi_func = coco_generator_atl(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot, type, vcoco_type = vcoco_type)
# semi is atl. a weak-supervised manner.
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type,
is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
im_orig, image_id, num_pos, blobs = next(semi_func)
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def obtain_coco_data_hoicoco_24_atl(Pos_augment=15, Neg_select=30, augment_type=0, pattern_type=False, is_zero_shot=0, type=0):
# default
verb_num = 24
g_func = coco_generator2
def generator3(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot):
buffer = [[] for i in range(4)]
import time
st = time.time()
count_time = 0
avg_time = 0
semi_func = coco_generator_atl(Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot, type)
# semi is atl. a weak-supervised manner.
for im_orig, image_id, num_pos, blobs in g_func(Pos_augment, Neg_select, augment_type, pattern_type,
is_zero_shot):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
im_orig, image_id, num_pos, blobs = next(semi_func)
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(blobs)
# print(im_orig.shape, image_id, num_pos,
yield buffer[0][0], buffer[1][0], buffer[2][0], buffer[3][0], buffer[0][1], buffer[1][1], buffer[2][1], \
buffer[3][1],
buffer = [[] for i in range(4)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
# generator()
dataset = tf.data.Dataset.from_generator(
partial(generator3, Pos_augment, Neg_select, augment_type, pattern_type, is_zero_shot),
output_types=(tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}, tf.float32, tf.int32, tf.int32, {
'H_boxes': tf.float32,
'Hsp_boxes': tf.float32,
'O_boxes': tf.float32,
'gt_class_sp': tf.float32,
'gt_class_HO': tf.float32,
'gt_class_H': tf.float32,
'gt_class_C': tf.float32,
'Mask_sp': tf.float32,
'Mask_HO': tf.float32,
'Mask_H': tf.float32,
'sp': tf.float32,
}), output_shapes=(tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}, tf.TensorShape([1, None, None, 3]), tf.TensorShape([]), tf.TensorShape([]),
{
'H_boxes': tf.TensorShape([None, 5]),
'Hsp_boxes': tf.TensorShape([None, 5]),
'O_boxes': tf.TensorShape([None, 5]),
'gt_class_sp': tf.TensorShape([None, verb_num]),
'gt_class_HO': tf.TensorShape([None, verb_num]),
'gt_class_H': tf.TensorShape([None, verb_num]),
'gt_class_C': tf.TensorShape([None, 222]),
'Mask_sp': tf.TensorShape([None, verb_num]),
'Mask_HO': tf.TensorShape([None, verb_num]),
'Mask_H': tf.TensorShape([None, verb_num]),
'sp': tf.TensorShape([None, 64, 64, 2]),
}))
dataset = dataset.prefetch(100)
# dataset = dataset.shuffle(1000)
# dataset = dataset.repeat(100)
# dataset = dataset.repeat(1000).shuffle(1000)
# dataset._dataset.batch(3)
iterator = dataset.make_one_shot_iterator()
image, image_id, num_pos, blobs, image1, image_id1, num_pos1, blobs1 = iterator.get_next()
return [image, image1], [image_id, image_id1], [num_pos, num_pos1], [blobs, blobs1]
def get_epoch_iters(model_name):
epoch_iters = 43273
if model_name.__contains__('zsnrare'):
epoch_iters = 20000
elif model_name.__contains__('zs_'):
epoch_iters = 20000
elif model_name.__contains__('zsrare'):
epoch_iters = 40000
else:
epoch_iters = 43273
return epoch_iters
def obtain_data_vcl_hico(Pos_augment=15, Neg_select=60, augment_type=0, with_pose=False, zero_shot_type=0, isalign=False,
epoch=0):
# we do not use pose, thus we remove it.
with open(cfg.DATA_DIR + '/' + 'Trainval_GT_HICO.pkl', "rb") as f:
Trainval_GT = pickle.load(f, encoding='latin1')
with open(cfg.DATA_DIR + '/' + 'Trainval_Neg_HICO.pkl', "rb") as f:
Trainval_N = pickle.load(f, encoding='latin1')
g_func = generator2
def generator3(Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type):
buffer = [[] for i in range(7)]
import time
st = time.time()
count_time = 0
avg_time = 0
for im_orig, image_id, num_pos, Human_augmented, Object_augmented, action_HO, Pattern in g_func(Trainval_GT,
Trainval_N,
Pos_augment,
Neg_select,
augment_type,
with_pose,
zero_shot_type,
isalign, epoch):
buffer[0].append(im_orig)
buffer[1].append(image_id)
buffer[2].append(num_pos)
buffer[3].append(Human_augmented)
buffer[4].append(Object_augmented)
buffer[5].append(action_HO)
buffer[6].append(Pattern)
if len(buffer[0]) > 1:
# print("inner:", buffer[0][0].shape, buffer[0][1].shape, buffer[1], buffer[2], buffer[3].shape, buffer[4].shape, buffer[5].shape, buffer[6].shape)
# print("inner:", buffer[1], buffer[2][0], buffer[2][1], buffer[3][0].shape, buffer[3][1].shape, buffer[5][0].shape, buffer[5][1].shape)
# yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[6]
if len(buffer[3][0]) < len(buffer[3][1]):
# make sure the second batch is less.
for i in range(len(buffer)):
tmp = buffer[i][0]
buffer[i][0] = buffer[i][1]
buffer[i][1] = tmp
split_idx = len(buffer[5][0])
buffer = buffer[:3] + [np.concatenate(item, axis=0) for item in buffer[3:]] + buffer[-1:]
yield buffer[0][0], buffer[0][1], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5], buffer[
6], split_idx
buffer = [[] for i in range(7)]
# avg_time = ((time.time() - st) + avg_time * count_time) / (count_time + 1)
# count_time += 1
# print('generate batch:', time.time() - st, "average;", avg_time)
# st = time.time()
if with_pose:
pattern_channel = 3
else:
pattern_channel = 2
dataset = tf.data.Dataset.from_generator(
partial(generator3, Trainval_GT, Trainval_N, Pos_augment, Neg_select, augment_type),
output_types=(
tf.float32, tf.float32, tf.int32, tf.int64, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32),
output_shapes=(
tf.TensorShape([1, None, None, 3]),
tf.TensorShape([1, None, None, 3]),
tf.TensorShape([2, ]),
tf.TensorShape([2, ]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 5]),
tf.TensorShape([None, 600]),
tf.TensorShape([None, 64, 64, pattern_channel]),
tf.TensorShape([])
)
)
dataset = dataset.prefetch(100)
iterator = dataset.make_one_shot_iterator()
image, image2, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp, split_idx = iterator.get_next()
return [image, image2], image_id, num_pos, [Human_augmented[:split_idx], Human_augmented[split_idx:]], \
[Object_augmented[:split_idx], Object_augmented[split_idx:]], \
[action_HO[:split_idx], action_HO[split_idx:]], \
[sp[:split_idx], sp[split_idx:]]
def Augmented_HO_Neg_HICO_inner(GT, negs, shape, Pos_augment, Neg_select, with_pose):
image_id = GT[0]
Human = GT[2]
Object = GT[3]
pose_list = []
if Pos_augment < 0:
action_HO = np.empty([0, 600])
Human_augmented = np.empty([0, 5])
Object_augmented = np.empty([0, 5])
num_pos = 0
else:
action_HO_ = Generate_action_HICO(GT[1])
action_HO = action_HO_
Human_augmented = Augmented_box(Human, shape, image_id, Pos_augment)
Object_augmented = Augmented_box(Object, shape, image_id, Pos_augment)
Human_augmented = Human_augmented[:min(len(Human_augmented), len(Object_augmented))]
Object_augmented = Object_augmented[:min(len(Human_augmented), len(Object_augmented))]
num_pos = len(Human_augmented)
for i in range(num_pos - 1):
action_HO = np.concatenate((action_HO, action_HO_), axis=0)
if with_pose: pose_list = [GT[5]] * num_pos
num_pos_neg = len(Human_augmented)
if with_pose:
pattern_channel = 3
else:
pattern_channel = 2
Pattern = get_pattern(Human_augmented, Object_augmented, num_pos_neg, pose_list, shape, with_pose)
if negs is not None and Neg_select > 0:
if len(negs) < Neg_select:
Neg_select = len(negs)
List = range(Neg_select)
else:
List = random.sample(range(len(negs)), Neg_select)
_Human_augmented, _Object_augmented, _action_HO, _Pattern = get_neg_items(List, negs, shape, with_pose)
Human_augmented = np.concatenate([Human_augmented, _Human_augmented], axis=0)
Object_augmented = np.concatenate([Object_augmented, _Object_augmented], axis=0)
action_HO = np.concatenate([action_HO, _action_HO], axis=0)
Pattern = np.concatenate([Pattern, _Pattern], axis=0)
num_pos_neg = len(Human_augmented)
Pattern = Pattern.reshape(num_pos_neg, 64, 64, pattern_channel)
Human_augmented = Human_augmented.reshape(num_pos_neg, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 600)
return Pattern, Human_augmented, Object_augmented, action_HO, num_pos
def get_pattern(Human_augmented, Object_augmented, num_pos_neg, pose_list, shape, with_pose):
pattern_channel = 2
Pattern = np.empty((0, 64, 64, pattern_channel), dtype=np.float32)
for i in range(num_pos_neg):
# Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:]).reshape(1, 64, 64, 2)
# there are poses for the negative sample
Pattern_ = Get_next_sp(Human_augmented[i][1:], Object_augmented[i][1:])
Pattern_ = Pattern_.reshape(1, 64, 64, pattern_channel)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
return Pattern
def get_neg_items(neg_select_list, negs, shape, with_pose):
action_HO = np.empty([0, 600])
Human_augmented = np.empty([0, 5])
Object_augmented = np.empty([0, 5])
pose_list = []
for i in range(len(neg_select_list)):
Neg = negs[neg_select_list[i]]
if with_pose: pose_list.append(Neg[7])
Human_augmented = np.concatenate(
(Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1, 5)), axis=0)
Object_augmented = np.concatenate(
(Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1, 5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
num_pos_neg = len(Human_augmented)
Pattern = get_pattern(Human_augmented, Object_augmented, num_pos_neg, pose_list, shape, with_pose)
return Human_augmented, Object_augmented, action_HO, Pattern
| 49.442618
| 163
| 0.532989
| 20,492
| 153,371
| 3.761419
| 0.04026
| 0.040478
| 0.03573
| 0.004567
| 0.870406
| 0.848882
| 0.829993
| 0.81074
| 0.79028
| 0.774738
| 0
| 0.089126
| 0.318404
| 153,371
| 3,101
| 164
| 49.458562
| 0.648211
| 0.102066
| 0
| 0.699747
| 0
| 0
| 0.043074
| 0.013681
| 0
| 0
| 0
| 0
| 0.002534
| 1
| 0.025338
| false
| 0.002111
| 0.016047
| 0
| 0.061233
| 0.009291
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0f0e142729a14317a69e8d385afb46bde4284923
| 61
|
py
|
Python
|
ContactHands/contact_hands_two_stream/evaluation/__init__.py
|
seoyoon130/Graduation_Project
|
9082cb93fb4f73c3a1577f63e906e6eb7f147dc4
|
[
"Apache-2.0"
] | 26
|
2020-10-20T01:58:26.000Z
|
2022-02-24T11:48:10.000Z
|
ContactHands/contact_hands_two_stream/evaluation/__init__.py
|
seoyoon130/Graduation_Project
|
9082cb93fb4f73c3a1577f63e906e6eb7f147dc4
|
[
"Apache-2.0"
] | 5
|
2020-10-21T05:39:08.000Z
|
2021-09-17T13:57:29.000Z
|
contact_hands_two_stream/evaluation/__init__.py
|
cvlab-stonybrook/ContactHands
|
6aba9a5f098b50529e589b7835264df9264844e9
|
[
"MIT"
] | 1
|
2022-02-24T11:48:14.000Z
|
2022-02-24T11:48:14.000Z
|
from .evaluator_ourdata import PascalVOCContactHandsEvaluator
| 61
| 61
| 0.934426
| 5
| 61
| 11.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04918
| 61
| 1
| 61
| 61
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f0f1ff11d8e7394f9595e6cacdd63bb00caad24
| 368
|
py
|
Python
|
unsup_spatial_pred/__init__.py
|
alaflaquiere/unsupervised-spatial-predictor
|
3c8aa02dc20782d31d1df791dd5e92dce275aec2
|
[
"MIT"
] | null | null | null |
unsup_spatial_pred/__init__.py
|
alaflaquiere/unsupervised-spatial-predictor
|
3c8aa02dc20782d31d1df791dd5e92dce275aec2
|
[
"MIT"
] | null | null | null |
unsup_spatial_pred/__init__.py
|
alaflaquiere/unsupervised-spatial-predictor
|
3c8aa02dc20782d31d1df791dd5e92dce275aec2
|
[
"MIT"
] | null | null | null |
from unsup_spatial_pred.utils.data_utils import get_dataloader, load_regular_grid
from unsup_spatial_pred.network.siamese_network import SiameseSMPredictor
from unsup_spatial_pred.analyze.evaluator import Evaluator
from unsup_spatial_pred.analyze.live_visualizer import save_embedding, start_display_server
from unsup_spatial_pred.train.training import run_experiment
| 61.333333
| 91
| 0.907609
| 52
| 368
| 6.038462
| 0.538462
| 0.143312
| 0.254777
| 0.318471
| 0.171975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059783
| 368
| 5
| 92
| 73.6
| 0.907514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f36515b28109285aae7c23e030b46c1cbf8a95e
| 151
|
py
|
Python
|
src/rendering/__init__.py
|
CorentinBrtx/image-stitching
|
7ee7eda3bd8717dd9996eacb9d58eb4ed1e6ad80
|
[
"MIT"
] | 9
|
2022-01-25T14:59:57.000Z
|
2022-03-24T13:25:23.000Z
|
src/rendering/__init__.py
|
CorentinBrtx/image-stitching
|
7ee7eda3bd8717dd9996eacb9d58eb4ed1e6ad80
|
[
"MIT"
] | null | null | null |
src/rendering/__init__.py
|
CorentinBrtx/image-stitching
|
7ee7eda3bd8717dd9996eacb9d58eb4ed1e6ad80
|
[
"MIT"
] | null | null | null |
from .gain_compensation import set_gain_compensations
from .multiband_blending import multi_band_blending
from .simple_blending import simple_blending
| 37.75
| 53
| 0.900662
| 20
| 151
| 6.4
| 0.55
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07947
| 151
| 3
| 54
| 50.333333
| 0.920863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f4d4816c48653d27898204dea4f719c0e7b0d70
| 18,887
|
py
|
Python
|
steprocker.py
|
RavnikM/Py-TMCM-1110-Steprocker
|
7e6e833514b0da38c0ef4c2420246c0b940e2f47
|
[
"MIT"
] | null | null | null |
steprocker.py
|
RavnikM/Py-TMCM-1110-Steprocker
|
7e6e833514b0da38c0ef4c2420246c0b940e2f47
|
[
"MIT"
] | null | null | null |
steprocker.py
|
RavnikM/Py-TMCM-1110-Steprocker
|
7e6e833514b0da38c0ef4c2420246c0b940e2f47
|
[
"MIT"
] | null | null | null |
import serial
import struct
from threading import Lock
class Steprocker():
def __init__(self,port):
self.lock = Lock()
self.serial = serial.Serial(port=port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None) # behavior of read(x) -> wait forever / until requested number of bytes are received
def __del__(self):
if self.serial.isOpen():
self.serial.close()
def rs232_checksum(self,the_bytes):
value = b'%02X' % (sum(the_bytes) & 0xFF)
return struct.pack(">B", int(value,16))
def get_maximum_positioning_speed(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x04 - Type (4 = Maximum positioning speed)
# x00 - Motor / Bank
# x00 - Value byte3 (not used)
# x00 - Value byte2 (not used)
# x00 - Value byte1 (not used)
# x00 - Value byte0 (not used)
try:
self.lock.acquire()
packet = b'\x01\x06\x04\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
return struct.unpack('>i', reply[4:8])[0]
finally:
self.lock.release()
def set_maximum_positioning_speed(self, value):
# Fiksni del
# x01 - Target address (1 = board number)
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x04 - Type (4 = Maximum positioning speed)
# x00 - Motor / Bank
# Variabilni del
# Value byte3
# Value byte2
# Value byte1
# Value byte0
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x04\x00'
uvalue_bytes = struct.pack(">i", value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_maximum_acceleration(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x05 - Type (5 = Maximum acceleration)
# x00 - Motor / Bank
# x00 - Value byte3 (not used)
# x00 - Value byte2 (not used)
# x00 - Value byte1 (not used)
# x00 - Value byte0 (not used)
try:
self.lock.acquire()
packet = b'\x01\x06\x05\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
return struct.unpack('>i', reply[4:8])[0]
finally:
self.lock.release()
def set_maximum_acceleration(self, value):
# Fiksni del
# x01 - Target address (1 = board number)
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x05 - Type (5 = Maximum acceleration)
# x00 - Motor / Bank
# Variabilni del
# Value byte3
# Value byte2
# Value byte1
# Value byte0
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x05\x00'
uvalue_bytes = struct.pack(">i", value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_actual_position(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x01 - Type (1 = Actual position)
# x00 - Motor / Bank
# x00 - Value byte3 (not used)
# x00 - Value byte2 (not used)
# x00 - Value byte1 (not used)
# x00 - Value byte0 (not used)
try:
self.lock.acquire()
packet = b'\x01\x06\x01\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
return struct.unpack('>i', reply[4:8])[0]
finally:
self.lock.release()
def set_actual_position(self, value):
# Fiksni del
# x01 - Target address (1 = board number)
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x01 - Type (1 = Actual position)
# x00 - Motor / Bank
# Variabilni del
# Value byte3
# Value byte2
# Value byte1
# Value byte0
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x01\x00'
uvalue_bytes = struct.pack(">i", value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_target_position(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x00 - Type (1 = Target position)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 (not used)
# x00 - Value byte2 (not used)
# x00 - Value byte1 (not used)
# x00 - Value byte0 (not used)
try:
self.lock.acquire()
packet = b'\x01\x06\x00\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
return struct.unpack('>i', reply[4:8])[0]
finally:
self.lock.release()
def set_target_position(self, value):
# Fiksni del
# x01 - Target address (1 = board number)
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x00 - Type (0 = Target position)
# x00 - Motor / Bank
# Variabilni del
# Value byte3
# Value byte2
# Value byte1
# Value byte0
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x00\x00'
uvalue_bytes = struct.pack(">i", value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_run_current(self):
# Fiksni del
# x01 - Target address 1 = board number
# x06 - Instruction number 5 = GAP(Get axis parameter)
# x06 - Type 6 = run currnet
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 (not used)
# x00 - Value byte2 (not used)
# x00 - Value byte1 (not used)
# x00 - Value byte0 (not used)
try:
self.lock.acquire()
packet = b'\x01\x06\x06\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
return reply[7]
finally:
self.lock.release()
def set_run_current(self, value):
# Fiksni del
# x01 - Target address (1 = board number)
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x06 - Type (6 = run currnet)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# Value byte0 - parameter value 0..255
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x06\x00\x00\x00\x00'
uvalue_bytes = struct.pack("B", value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_standby_current(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x07 - Type (7 = standby currnet)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# x00 - Value byte0 - not used
try:
self.lock.acquire()
packet = b'\x01\x06\x07\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
return reply[7]
finally:
self.lock.release()
def set_standby_current(self, value):
# Fiksni del
# x01 - Target address (1 = board number)
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x07 - Type (7 = standby currnet)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# Value - Value byte0 - parameter value 0..255
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x07\x00\x00\x00\x00'
uvalue_bytes = struct.pack("B", value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_micro_steps(self, return_str=False):
# microsteps0..8
# Microstep resolutions per full step:
# 0 fullstep
# 1 halfstep
# 2 4 microsteps
# 3 8 microsteps
# 4 16 microsteps
# 5 32 microsteps
# 6 64 microsteps
# 7 128 microsteps
# 8 256 microsteps
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x8C - Type (140 = get microsteps)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# x00 - Value byte0 - not used
try:
self.lock.acquire()
packet = b'\x01\x06\x8C\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[7] == 0:
if not return_str:
return 1
else:
return 'full-steps'
elif reply[7] == 1:
if not return_str:
return 2
else:
return 'half-steps'
elif reply[7] == 2:
if not return_str:
return 4
else:
return '4-microsteps'
elif reply[7] == 3:
if not return_str:
return 8
else:
return '8-microsteps'
elif reply[7] == 4:
if not return_str:
return 16
else:
return '16-microsteps'
elif reply[7] == 5:
if not return_str:
return 32
else:
return '32-microsteps'
elif reply[7] == 6:
if not return_str:
return 64
else:
return '64-microsteps'
elif reply[7] == 7:
if not return_str:
return 128
else:
return '128-microsteps'
elif reply[7] == 8:
if not return_str:
return 256
else:
return '256-microsteps'
finally:
self.lock.release()
def set_micro_steps(self, value):
# microsteps 0..8
# Microstep resolutions per full step:
# 0 - fullstep
# 1 - halfstep
# 2 - 4 microsteps
# 3 - 8 microsteps
# 4 - 16 microsteps
# 5 - 32 microsteps
# 6 - 64 microsteps
# 7 - 128 microsteps
# 8 - 256 microsteps
if value == 1:
true_value = 0
elif value == 2:
true_value = 1
elif value == 4:
true_value = 2
elif value == 8:
true_value = 3
elif value == 16:
true_value = 4
elif value == 32:
true_value = 5
elif value == 64:
true_value = 6
elif value == 128:
true_value = 7
elif value == 256:
true_value = 8
# Fiksni del
# x01 - Target address 1 = board number
# x05 - Instruction number (5 = SAP(Set axis parameter))
# x8C - Type 140 = set microsteps
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# Value - Value byte0 - parameter value 0..8
try:
self.lock.acquire()
fixed_part = b'\x01\x05\x8C\x00\x00\x00\x00'
uvalue_bytes = struct.pack("B", true_value)
packet = fixed_part + uvalue_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def get_target_position_reached(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x06 - Instruction number (6 = GAP(Get axis parameter))
# x08 - Type (8 = Target position reached flag)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# x00 - Value byte0 - not used
try:
self.lock.acquire()
packet = b'\x01\x06\x08\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[7] == 1:
return True
elif reply[7] == 0:
return False
finally:
self.lock.release()
def motor_stop(self):
# Fiksni del
# x01 - Target address (1 = board number)
# x03 - Instruction number (3 = MST(Motor Stop))
# x00 - Type (0 - not used)
# x00 - Motor / Bank
# Variabilni del
# x00 - Value byte3 - not used
# x00 - Value byte2 - not used
# x00 - Value byte1 - not used
# x00 - Value byte0 - not used
try:
self.lock.acquire()
packet = b'\x01\x03\x00\x00\x00\x00\x00\x00'
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def movetopos_abs(self,microsteps, calculate_to_fullsteps=False):
# Fiksni del
# x01 - Target address (1 = board number)
# x04 - Instruction number (4 = MVP(Move to Position))
# x00 - Type (0 = ABSolute)
# x00 - Motor / Bank
# Variabilni del
# Value byte3
# Value byte2
# Value byte1
# Value byte0
try:
if calculate_to_fullsteps:
microsteps = microsteps * self.get_micro_steps()
self.lock.acquire()
fixed_part = b'\x01\x04\x00\x00'
usteps_bytes = struct.pack(">i", microsteps)
packet = fixed_part + usteps_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
def movetopos_rel(self,microsteps, calculate_to_fullsteps=False):
# Fiksni del
# x01 - Target address (1 = board number)
# x04 - Instruction number (4 = MVP(Move to Position))
# x01 - Type (1 = Relative)
# x00 - Motor / Bank
# Variabilni del
# Value byte3
# Value byte2
# Value byte1
# Value byte0
try:
if calculate_to_fullsteps:
microsteps = microsteps * self.get_micro_steps()
self.lock.acquire()
fixed_part = b'\x01\x04\x01\x00'
usteps_bytes = struct.pack(">i", microsteps)
packet = fixed_part + usteps_bytes
packet = packet + self.rs232_checksum(packet)
self.serial.write(packet)
reply = self.serial.read(9)
if reply[2] == 100:
return True
else:
return False
finally:
self.lock.release()
| 33.726786
| 127
| 0.488219
| 2,028
| 18,887
| 4.480276
| 0.073965
| 0.032357
| 0.034669
| 0.054479
| 0.858684
| 0.835021
| 0.831279
| 0.825116
| 0.825116
| 0.813119
| 0
| 0.079542
| 0.42156
| 18,887
| 560
| 128
| 33.726786
| 0.752128
| 0.278816
| 0
| 0.67619
| 0
| 0
| 0.047271
| 0.028922
| 0
| 0
| 0.000311
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.009524
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0f73d88b9937826c02dab99cd4c7276831c1e11d
| 147
|
py
|
Python
|
testapp/wagtail_wordpress_importer/test/__init__.py
|
nickmoreton/wagtail_wordpress_importer
|
fbe6b60ae624edac3f42a62ce30af4a0c548b4ed
|
[
"MIT"
] | null | null | null |
testapp/wagtail_wordpress_importer/test/__init__.py
|
nickmoreton/wagtail_wordpress_importer
|
fbe6b60ae624edac3f42a62ce30af4a0c548b4ed
|
[
"MIT"
] | null | null | null |
testapp/wagtail_wordpress_importer/test/__init__.py
|
nickmoreton/wagtail_wordpress_importer
|
fbe6b60ae624edac3f42a62ce30af4a0c548b4ed
|
[
"MIT"
] | null | null | null |
from .api_fetcher_tests import *
from .base_importer_command_tests import *
from .import_delete_commands_tests import *
from .utils_tests import *
| 29.4
| 43
| 0.836735
| 21
| 147
| 5.428571
| 0.52381
| 0.385965
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108844
| 147
| 4
| 44
| 36.75
| 0.870229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7e1e3187a0e1016eb15ae2703d1db86e3b2ac079
| 10,078
|
py
|
Python
|
autox/autoxserver.py
|
fanghy06/AutoX
|
0bad349ef1b047152e2608760fd5d197128be723
|
[
"Apache-2.0"
] | 499
|
2021-07-27T02:57:58.000Z
|
2022-03-28T12:08:27.000Z
|
autox/autoxserver.py
|
fanghy06/AutoX
|
0bad349ef1b047152e2608760fd5d197128be723
|
[
"Apache-2.0"
] | 9
|
2021-08-03T15:14:56.000Z
|
2022-03-11T07:06:06.000Z
|
autox/autoxserver.py
|
fanghy06/AutoX
|
0bad349ef1b047152e2608760fd5d197128be723
|
[
"Apache-2.0"
] | 87
|
2021-07-27T01:13:02.000Z
|
2022-03-29T02:14:09.000Z
|
from autox.autox_server.ensemble import ensemble
from autox.autox_server.feature_engineer import fe_count, fe_onehot, fe_shift, fe_time_diff
from autox.autox_server.feature_engineer import fe_kv, fe_stat_for_same_prefix, fe_frequency
from autox.autox_server.feature_engineer import fe_time_count, fe_window_count, fe_time_rolling_count
from autox.autox_server.feature_engineer import fe_window2, fe_txt
from autox.autox_server.join_table import join_table
from autox.autox_server.model import lgb_with_fe, lgb_for_feature_selection
from autox.autox_server.model import model_util
from autox.autox_server.pre_process import process_1, process_2, process_3
from autox.autox_server.read_data import read_data
from autox.autox_server.util import log, load_obj
from autox.autox_server.util import merge_table, save_obj
class AutoXServer():
def __init__(self, is_train, server_name, data_info_path=None, train_set_path=None):
if is_train:
assert(data_info_path is not None and train_set_path is not None)
else:
assert (data_info_path is None and train_set_path is None)
self.is_train = is_train
self.data_info_path = data_info_path
self.train_set_path = train_set_path
self.server_name = server_name
def fit(self):
data_name = self.server_name
log("data name: {}".format(data_name))
lgb_para_dict_1 = model_util.lgb_para_dict_1
lgb_para_dict_2 = model_util.lgb_para_dict_2
params_1 = model_util.params_1
params_2 = model_util.params_2
self.G_hist = {}
self.G_hist['val_auc'] = {}
self.G_hist['predict'] = {}
self.G_hist['delete_column'] = {}
phase = 'train'
log("*** phase: {}".format(phase))
is_train = True if phase == 'train' else False
self.G_df_dict, self.G_data_info, remain_time = read_data.read_data(data_info_path=self.data_info_path,
train_set_path=self.train_set_path, is_train=is_train, debug=False)
remain_time = process_1.preprocess(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_simple_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = process_2.preprocess_2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_indirect_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.preprocess_after_join_indirect_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.join_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = process_3.preprocess_3(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_kv.fe_kv(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_stat_for_same_prefix.fe_stat_for_same_prefix(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_frequency.fe_frequency(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_count.fe_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_shift.fe_shift(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_diff.fe_time_diff(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_count.fe_time_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window_count.fe_window_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_rolling_count.fe_time_rolling_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window2.fe_window2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_onehot.fe_onehot(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_txt.fe_txt(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = merge_table(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
exp_name = 'feature_selection'
remain_time = lgb_for_feature_selection.lgb_for_feature_selection(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name)
exp_name_1 = 'fe_lgb'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name_1)
exp_name_2 = 'fe_lgb_2'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_2, lgb_para_dict_2, data_name, exp_name_2)
_ = ensemble.ensemble(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, top_k=2)
def predict(self, df=None, test_set_path=None):
assert ((df is None and test_set_path is not None) or (df is not None and test_set_path is None))
data_name = self.server_name
lgb_para_dict_1 = model_util.lgb_para_dict_1
lgb_para_dict_2 = model_util.lgb_para_dict_2
params_1 = model_util.params_1
params_2 = model_util.params_2
phase = 'test'
log("*** phase: {}".format(phase))
remain_time = 1e10
is_train = True if phase == 'train' else False
self.G_df_dict, self.G_data_info, remain_time = read_data.read_data(data_info=self.G_data_info, test_set_path=test_set_path, df_dict=self.G_df_dict,
is_train=is_train, debug=False, remain_time=remain_time)
remain_time = process_1.preprocess(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_simple_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = process_2.preprocess_2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_indirect_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.preprocess_after_join_indirect_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.join_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = process_3.preprocess_3(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_kv.fe_kv(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_stat_for_same_prefix.fe_stat_for_same_prefix(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_frequency.fe_frequency(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_count.fe_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_shift.fe_shift(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_diff.fe_time_diff(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_count.fe_time_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window_count.fe_window_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_rolling_count.fe_time_rolling_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window2.fe_window2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_onehot.fe_onehot(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_txt.fe_txt(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = merge_table(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
exp_name = 'feature_selection'
remain_time = lgb_for_feature_selection.lgb_for_feature_selection(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name)
exp_name_1 = 'fe_lgb'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name_1)
exp_name_2 = 'fe_lgb_2'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_2, lgb_para_dict_2, data_name, exp_name_2)
_ = ensemble.ensemble(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, top_k=2)
sub = self.G_hist['predict']['ensemble']
sub.index = range(len(sub))
return sub
def save_server(self, path):
data_name = self.server_name
save_obj(self.G_df_dict, path + f'/{data_name}_G_df_dict.pkl')
save_obj(self.G_data_info, path + f'/{data_name}_G_data_info.pkl')
save_obj(self.G_hist, path + f'/{data_name}_G_hist.pkl')
def load_server(self, path):
data_name = self.server_name
self.G_df_dict = load_obj(path + f'/{data_name}_G_df_dict.pkl')
self.G_data_info = load_obj(path + f'/{data_name}_G_data_info.pkl')
self.G_hist = load_obj(path + f'/{data_name}_G_hist.pkl')
| 65.869281
| 191
| 0.743798
| 1,768
| 10,078
| 3.76414
| 0.054864
| 0.120962
| 0.07438
| 0.087603
| 0.864162
| 0.832757
| 0.798948
| 0.787979
| 0.728174
| 0.728174
| 0
| 0.007727
| 0.165311
| 10,078
| 152
| 192
| 66.302632
| 0.783405
| 0
| 0
| 0.59322
| 0
| 0
| 0.031359
| 0.015282
| 0
| 0
| 0
| 0
| 0.025424
| 1
| 0.042373
| false
| 0
| 0.101695
| 0
| 0.161017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e2d4fd2ac241c2c37224631a6c00435bed89865
| 993
|
py
|
Python
|
tests/dfa2re/test_dfa2re.py
|
xinqin23/ShapeIt
|
4c643f71bca3b1acc388c688b0e8ffb59109be03
|
[
"MIT"
] | null | null | null |
tests/dfa2re/test_dfa2re.py
|
xinqin23/ShapeIt
|
4c643f71bca3b1acc388c688b0e8ffb59109be03
|
[
"MIT"
] | null | null | null |
tests/dfa2re/test_dfa2re.py
|
xinqin23/ShapeIt
|
4c643f71bca3b1acc388c688b0e8ffb59109be03
|
[
"MIT"
] | null | null | null |
import pytest
import networkx as nx
from shapeit.shape_it import ShapeIt
def test_dfa2re_0():
aut = nx.MultiDiGraph()
aut.add_node(0, initial=True, accepting=False)
aut.add_node(1, initial=False, accepting=True)
aut.add_edge(0, 1, label='0')
aut.add_edge(1, 0, label='0')
aut.add_edge(1, 0, label='1')
shapeit = ShapeIt()
re = shapeit.dfa2re(aut, 0)
assert re == '((0)).(((0 + 1)).(0))*'
def test_dfa2re_1():
aut = nx.MultiDiGraph()
aut.add_node(0, initial=True, accepting=False)
aut.add_node(1, initial=False, accepting=True)
aut.add_edge(0, 1, label='0')
aut.add_edge(1, 0, label='0')
shapeit = ShapeIt()
re = shapeit.dfa2re(aut, 0)
assert re == '((0)).((0).(0))*'
def test_dfa2re_2():
aut = nx.MultiDiGraph()
aut.add_node(0, initial=True, accepting=True)
aut.add_node(1, initial=False, accepting=False)
aut.add_edge(0, 1, label='0')
aut.add_edge(1, 0, label='0')
shapeit = ShapeIt()
re = shapeit.dfa2re(aut, 0)
assert re == '(eps + ((0)).((0).(0))*.((0)))'
| 24.825
| 48
| 0.657603
| 169
| 993
| 3.745562
| 0.16568
| 0.123223
| 0.110585
| 0.075829
| 0.805687
| 0.805687
| 0.805687
| 0.755134
| 0.728278
| 0.728278
| 0
| 0.058207
| 0.134945
| 993
| 40
| 49
| 24.825
| 0.678696
| 0
| 0
| 0.612903
| 0
| 0
| 0.075453
| 0.023139
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.096774
| false
| 0
| 0.096774
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e521a825cb19d628f10c01686a3109c63584d9c
| 6,384
|
py
|
Python
|
tests/test_menu_items.py
|
codingedward/book-a-meal-api
|
36756abc225bf7e8306330f2c3e223dc32af7869
|
[
"MIT"
] | null | null | null |
tests/test_menu_items.py
|
codingedward/book-a-meal-api
|
36756abc225bf7e8306330f2c3e223dc32af7869
|
[
"MIT"
] | null | null | null |
tests/test_menu_items.py
|
codingedward/book-a-meal-api
|
36756abc225bf7e8306330f2c3e223dc32af7869
|
[
"MIT"
] | 2
|
2018-10-01T17:45:19.000Z
|
2020-12-07T13:48:25.000Z
|
import json
from app import create_app, db
from app.models import User, UserType
from .base import BaseTest
class MenuItemTests(BaseTest):
def setUp(self):
self.app = create_app(config_name='testing')
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
self.setUpAuth()
def data(self):
return json.dumps({
'quantity': 30,
'meal_id': self.create_meal()['meal']['id'],
'menu_id': self.create_menu()['menu']['id'],
})
def test_can_create_menu_item(self):
res = self.client.post(
'api/v1/menu-items', data=self.data(), headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved menu item', res.data)
def test_cannot_create_menu_item_without_quantity(self):
res = self.client.post(
'api/v1/menu-items',
data=self.data_without(['quantity']),
headers=self.admin_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'quantity field is required', res.data)
def test_cannot_create_menu_item_without_menu_id(self):
res = self.client.post(
'api/v1/menu-items',
data=self.data_without(['menu_id']),
headers=self.admin_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'id field is required', res.data)
def test_cannot_create_menu_item_without_meal_id(self):
res = self.client.post(
'api/v1/menu-items',
data=self.data_without(['meal_id']),
headers=self.admin_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'id field is required', res.data)
def test_cannot_create_menu_item_with_nonexistant_meal_id(self):
res = self.client.post(
'api/v1/menu-items',
data=self.data_with({
'meal_id': 300
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'The selected meal id is invalid', res.data)
def test_cannot_create_menu_item_with_nonexistant_menu_id(self):
res = self.client.post(
'api/v1/menu-items',
data=self.data_with({
'menu_id': 300
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'The selected menu id is invalid', res.data)
def test_can_update_menu_item(self):
menu_item = self.create_menu_item(self.data())
meal_id = self.create_meal(name='beef')['meal']['id']
res = self.client.put(
'api/v1/menu-items/{}'.format(menu_item['menu_item']['id']),
data=json.dumps({
'meal_id': meal_id
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Menu item successfully updated', res.data)
def test_cannot_update_menu_item_without_being_unique(self):
# create a menu item
menu_item = self.create_menu_item(self.data())['menu_item']
meal_id = menu_item['meal']['id']
menu_id = menu_item['menu']['id']
# create another menu item
new_meal_id = self.create_meal(name='beef')['meal']['id']
new_menu_item = self.create_menu_item(
json.dumps({
'quantity': 30,
'menu_id': menu_id,
'meal_id': new_meal_id,
}))
# try to update the first one with the second's values
res = self.client.put(
'api/v1/menu-items/{}'.format(new_menu_item['menu_item']['id']),
data=json.dumps({
'meal_id': meal_id,
'menu_id': 2,
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 400)
self.assertIn(b'is invalid', res.data)
def test_can_get_menu_item(self):
menu_item = self.create_menu_item(self.data())
res = self.client.get(
'api/v1/menu-items/{}'.format(menu_item['menu_item']['id']),
headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'successfully retrieved', res.data)
def test_can_get_many_menu_items_history(self):
self.create_menu_item(self.data())
res = self.client.get(
'api/v1/menu-items?history=1', headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully retrieved', res.data)
def test_can_get_many_menu_items(self):
self.create_menu_item(self.data())
res = self.client.get('api/v1/menu-items', headers=self.user_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Successfully retrieved', res.data)
def test_can_delete_menu_item(self):
menu_item = self.create_menu_item(self.data())
res = self.client.delete(
'api/v1/menu-items/{}'.format(menu_item['menu_item']['id']),
headers=self.admin_headers)
self.assertEqual(res.status_code, 200)
self.assertIn(b'Menu item successfully deleted', res.data)
def create_menu_item(self, data):
res = self.client.post(
'api/v1/menu-items', data=data, headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved menu item', res.data)
return self.to_dict(res)
def create_meal(self, name='ugali'):
res = self.client.post(
'api/v1/meals',
data=json.dumps({
'name': name,
'cost': 30,
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved meal', res.data)
return self.to_dict(res)
def create_menu(self, name='Lunch'):
res = self.client.post(
'api/v1/menus',
data=json.dumps({
'name': name,
}),
headers=self.admin_headers)
self.assertEqual(res.status_code, 201)
self.assertIn(b'Successfully saved menu', res.data)
return self.to_dict(res)
def tearDown(self):
with self.app.app_context():
db.drop_all()
| 37.333333
| 78
| 0.60213
| 824
| 6,384
| 4.450243
| 0.115291
| 0.087265
| 0.052359
| 0.102263
| 0.818653
| 0.787565
| 0.755113
| 0.737388
| 0.714208
| 0.645487
| 0
| 0.015924
| 0.272086
| 6,384
| 170
| 79
| 37.552941
| 0.773187
| 0.015038
| 0
| 0.530612
| 0
| 0
| 0.136856
| 0.004297
| 0
| 0
| 0
| 0
| 0.204082
| 1
| 0.122449
| false
| 0
| 0.027211
| 0.006803
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e6736307dc21e4437a017717108fb9cd2e908df
| 41
|
py
|
Python
|
bowling/sort/selection/__init__.py
|
necromuralist/Bowling-For-Data
|
8fb2bff206bf419812f96a5ad243e1d82959a00a
|
[
"MIT"
] | null | null | null |
bowling/sort/selection/__init__.py
|
necromuralist/Bowling-For-Data
|
8fb2bff206bf419812f96a5ad243e1d82959a00a
|
[
"MIT"
] | null | null | null |
bowling/sort/selection/__init__.py
|
necromuralist/Bowling-For-Data
|
8fb2bff206bf419812f96a5ad243e1d82959a00a
|
[
"MIT"
] | null | null | null |
from .selection import selection_counter
| 20.5
| 40
| 0.878049
| 5
| 41
| 7
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
adfbe28608e10d8964372bec46988193eac2c970
| 250
|
py
|
Python
|
Talent and Weapon Level-Up Material Drops/src/algorithm/__init__.py
|
RaylaKurosaki1503/Genshin_Impact-Projects
|
3828663914f2f74da9747cd9ffd983ef313ae0c5
|
[
"MIT"
] | null | null | null |
Talent and Weapon Level-Up Material Drops/src/algorithm/__init__.py
|
RaylaKurosaki1503/Genshin_Impact-Projects
|
3828663914f2f74da9747cd9ffd983ef313ae0c5
|
[
"MIT"
] | null | null | null |
Talent and Weapon Level-Up Material Drops/src/algorithm/__init__.py
|
RaylaKurosaki1503/Genshin_Impact-Projects
|
3828663914f2f74da9747cd9ffd983ef313ae0c5
|
[
"MIT"
] | null | null | null |
"""
Author: Rayla Kurosaki
File: __init__.py
Description: The initialization file for the package algorithm.
"""
from algorithm.phase1_get_data import *
from algorithm.phase2_print_to_file import *
from algorithm.phase3_print_to_workbook import *
| 20.833333
| 63
| 0.812
| 34
| 250
| 5.617647
| 0.647059
| 0.204188
| 0.198953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013636
| 0.12
| 250
| 11
| 64
| 22.727273
| 0.854545
| 0.424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
70c1ef8a5b5fef704533a23848def370b4801ae2
| 5,305
|
py
|
Python
|
tests/test_single_category_update.py
|
PatrickCmd/Yummy-Recipe-RestAPI
|
8911678be501d233e39f1b5c5a46aa3e82e5c844
|
[
"MIT"
] | null | null | null |
tests/test_single_category_update.py
|
PatrickCmd/Yummy-Recipe-RestAPI
|
8911678be501d233e39f1b5c5a46aa3e82e5c844
|
[
"MIT"
] | 41
|
2017-11-07T00:39:02.000Z
|
2019-10-21T15:09:58.000Z
|
tests/test_single_category_update.py
|
PatrickCmd/Yummy-Recipe-RestAPI
|
8911678be501d233e39f1b5c5a46aa3e82e5c844
|
[
"MIT"
] | 3
|
2017-11-18T16:03:34.000Z
|
2017-12-20T19:49:59.000Z
|
# tests/test_single_category_update.py
import unittest
import json
import uuid
import time
from api import db
from api.models import User, RecipeCategory
from tests.register_login import RegisterLogin
class TestUpdateSingleCategoriesBlueprint(RegisterLogin):
def test_update_single_recipe_category(self):
"""
Test for update single recipe category
"""
with self.client:
response = self.register_user(
"Patrick", "Walukagga",
"pwalukagga@gmail.com", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("pwalukagga@gmail.com", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
response = self.create_category("Breakfast",
"How to make breakfast",
headers)
category_data = json.dumps({"name": "Lunchfast",
"description":
"How to make lunchfast"})
response = self.client.put('/recipe_category/1',
headers=headers,
data=category_data)
self.assertEqual(response.status_code, 200)
self.assertIn('Recipe Category updated',
str(response.data))
self.assertNotIn('How to make breakfast',
str(response.data))
# update recipe category not in database
response = self.client.put('/recipe_category/3',
headers=headers,
data=category_data)
self.assertEqual(response.status_code, 404)
self.assertIn('No category found',
str(response.data))
self.assertNotIn('How to make lunchfast',
str(response.data))
def test_update_single_recipe_category_id_not_number(self):
"""
Test for update single recipe category id not number
"""
with self.client:
response = self.register_user(
"Patrick", "Walukagga",
"pwalukagga@gmail.com", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("pwalukagga@gmail.com", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
response = self.create_category("Breakfast",
"How to make breakfast",
headers)
category_data = json.dumps({"name": "Lunchfast",
"description":
"How to make lunchfast"})
response = self.client.put('/recipe_category/a',
headers=headers,
data=category_data)
self.assertEqual(response.status_code, 400)
self.assertIn('Category ID must be an integer',
str(response.data))
self.assertIn('fail', str(response.data))
def test_update_single_recipe_category_with_one_field(self):
"""
Test for update single recipe category with one field
"""
with self.client:
response = self.register_user(
"Patrick", "Walukagga",
"pwalukagga@gmail.com", "telnetcmd123"
)
# registered user login
rep_login = self.login_user("pwalukagga@gmail.com", "telnetcmd123")
# valid token
headers=dict(
Authorization='Bearer ' + json.loads(
rep_login.data.decode()
)['auth_token']
)
response = self.create_category("Breakfast",
"How to make breakfast",
headers)
category_data = json.dumps({"name": "Lunchfast"})
response = self.client.put('/recipe_category/1',
headers=headers,
data=category_data)
self.assertEqual(response.status_code, 200)
self.assertIn('Recipe Category updated',
str(response.data))
category_data = json.dumps({ "description":
"How to make lunchfast"})
response = self.client.put('/recipe_category/1',
headers=headers,
data=category_data)
self.assertEqual(response.status_code, 200)
self.assertIn('Recipe Category updated',
str(response.data))
if __name__ == '__main__':
unittest.main()
| 42.103175
| 79
| 0.481433
| 436
| 5,305
| 5.704128
| 0.197248
| 0.084439
| 0.028951
| 0.062726
| 0.825895
| 0.825895
| 0.798552
| 0.766787
| 0.710092
| 0.671492
| 0
| 0.012346
| 0.435061
| 5,305
| 125
| 80
| 42.44
| 0.817484
| 0.061074
| 0
| 0.69697
| 0
| 0
| 0.158238
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 1
| 0.030303
| false
| 0
| 0.070707
| 0
| 0.111111
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
70c2f075a6dbedbe2aebe6fb9502f022768e876b
| 119
|
py
|
Python
|
detection/data/util.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | 8
|
2021-05-21T03:38:52.000Z
|
2021-11-21T08:32:41.000Z
|
detection/data/util.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | null | null | null |
detection/data/util.py
|
stanford-policylab/surveilling-surveillance
|
bbb9a147927a6342eecfe07ffa756b3acdb63f35
|
[
"MIT"
] | 1
|
2021-06-13T21:49:14.000Z
|
2021-06-13T21:49:14.000Z
|
from pathlib import Path, PosixPath
def _is_path(file_path):
return isinstance(file_path, (str, PosixPath))
| 17
| 54
| 0.731092
| 16
| 119
| 5.1875
| 0.6875
| 0.192771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184874
| 119
| 6
| 55
| 19.833333
| 0.85567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
70c62e8e855d3b712588b926c684c8ff454e3bf3
| 60
|
py
|
Python
|
package-eg-test.py
|
thinmarwin/python-exercises
|
2d8ccdf9b0fcf73802b161ca31dd0428e92bbc66
|
[
"MIT"
] | null | null | null |
package-eg-test.py
|
thinmarwin/python-exercises
|
2d8ccdf9b0fcf73802b161ca31dd0428e92bbc66
|
[
"MIT"
] | null | null | null |
package-eg-test.py
|
thinmarwin/python-exercises
|
2d8ccdf9b0fcf73802b161ca31dd0428e92bbc66
|
[
"MIT"
] | null | null | null |
import package_example.ex41
package-example.ex41.convert()
| 15
| 30
| 0.833333
| 8
| 60
| 6.125
| 0.625
| 0.571429
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.066667
| 60
| 3
| 31
| 20
| 0.803571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
70d0d010999cf9fc72930b2583309f9127a9f629
| 99
|
py
|
Python
|
custom_django_auth_backend/logging.py
|
Chiorufarewerin/custom-django-auth-backend
|
89aa2330933472f8cfb13ad3c488d4347b6db128
|
[
"MIT"
] | null | null | null |
custom_django_auth_backend/logging.py
|
Chiorufarewerin/custom-django-auth-backend
|
89aa2330933472f8cfb13ad3c488d4347b6db128
|
[
"MIT"
] | null | null | null |
custom_django_auth_backend/logging.py
|
Chiorufarewerin/custom-django-auth-backend
|
89aa2330933472f8cfb13ad3c488d4347b6db128
|
[
"MIT"
] | null | null | null |
from .settings import LOGGER_NAME
from .utils import get_logger
logger = get_logger(LOGGER_NAME)
| 16.5
| 33
| 0.818182
| 15
| 99
| 5.133333
| 0.466667
| 0.25974
| 0.38961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 99
| 5
| 34
| 19.8
| 0.895349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb5e57b476ba9567514018000954b065d84a62f5
| 34
|
py
|
Python
|
digital_signature/__init__.py
|
syamamura0x00/RedRIbbon
|
9a035d3e64b70eee7519cccad9d9735e509d5a2f
|
[
"Apache-2.0"
] | null | null | null |
digital_signature/__init__.py
|
syamamura0x00/RedRIbbon
|
9a035d3e64b70eee7519cccad9d9735e509d5a2f
|
[
"Apache-2.0"
] | null | null | null |
digital_signature/__init__.py
|
syamamura0x00/RedRIbbon
|
9a035d3e64b70eee7519cccad9d9735e509d5a2f
|
[
"Apache-2.0"
] | null | null | null |
from .digital_signature import *
| 11.333333
| 32
| 0.794118
| 4
| 34
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 2
| 33
| 17
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb8a427f9a37f405511471601f02b428dc29f6f9
| 96
|
py
|
Python
|
suricate/api/main/__init__.py
|
marco-buttu/suricate
|
89cb406ffde7a67db2ac8594d3e9c371924c57bb
|
[
"BSD-3-Clause"
] | 1
|
2020-03-26T15:27:42.000Z
|
2020-03-26T15:27:42.000Z
|
suricate/api/main/__init__.py
|
marco-buttu/suricate
|
89cb406ffde7a67db2ac8594d3e9c371924c57bb
|
[
"BSD-3-Clause"
] | 75
|
2019-08-19T14:21:08.000Z
|
2020-03-26T11:24:12.000Z
|
suricate/api/main/__init__.py
|
marco-buttu/suricate
|
89cb406ffde7a67db2ac8594d3e9c371924c57bb
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
main = Blueprint('main', __name__)
from suricate.api import views
| 16
| 34
| 0.78125
| 13
| 96
| 5.461538
| 0.692308
| 0.366197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 96
| 5
| 35
| 19.2
| 0.865854
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
cba41a7cf1c04aed8dfd66206d9c9b49393f1262
| 160
|
py
|
Python
|
lambdas/Frontend-Lambda/create_token.py
|
david-fisher/320-F19-Track-I
|
cddff29ad9f79a794928eb29d44bc9f53f46f3fd
|
[
"BSD-3-Clause"
] | 8
|
2019-09-04T14:18:30.000Z
|
2020-02-04T18:06:50.000Z
|
lambdas/Frontend-Lambda/create_token.py
|
david-fisher/320-F19-Track-I
|
cddff29ad9f79a794928eb29d44bc9f53f46f3fd
|
[
"BSD-3-Clause"
] | 103
|
2019-09-19T18:15:25.000Z
|
2020-05-05T01:39:40.000Z
|
lambdas/Frontend-Lambda/create_token.py
|
david-fisher/320-F19-Track-I
|
cddff29ad9f79a794928eb29d44bc9f53f46f3fd
|
[
"BSD-3-Clause"
] | 2
|
2020-01-17T18:46:46.000Z
|
2020-05-04T15:53:34.000Z
|
import random
import string
def rand_token(N=64):
return ''.join(random.choices(string.ascii_uppercase + string.digits + string.ascii_lowercase, k=N))
| 26.666667
| 104
| 0.74375
| 23
| 160
| 5.043478
| 0.695652
| 0.189655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.1375
| 160
| 6
| 105
| 26.666667
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
cbb305385a5c951c924dac8441e15d42b33ad71b
| 46
|
py
|
Python
|
Modulo_5/semana_1/crear_arreglos/arange.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | null | null | null |
Modulo_5/semana_1/crear_arreglos/arange.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | null | null | null |
Modulo_5/semana_1/crear_arreglos/arange.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | 1
|
2022-03-04T00:57:18.000Z
|
2022-03-04T00:57:18.000Z
|
import numpy as np
print(np.arange(1,50,3))
| 9.2
| 24
| 0.695652
| 10
| 46
| 3.2
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0.152174
| 46
| 4
| 25
| 11.5
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
cbbc3bb7486135cb35b5a26282d6f9a051675662
| 134
|
py
|
Python
|
python/code_challenges/insertion-sort/tests/test_insertion_sort.py
|
YAHIAQOUS/data-structures-and-algorithms
|
73ed1d7f6bb0f60af3cafbac468122c4a035f348
|
[
"MIT"
] | null | null | null |
python/code_challenges/insertion-sort/tests/test_insertion_sort.py
|
YAHIAQOUS/data-structures-and-algorithms
|
73ed1d7f6bb0f60af3cafbac468122c4a035f348
|
[
"MIT"
] | 8
|
2021-08-14T14:46:14.000Z
|
2021-09-13T20:30:29.000Z
|
python/code_challenges/insertion-sort/tests/test_insertion_sort.py
|
YAHIAQOUS/data-structures-and-algorithms
|
73ed1d7f6bb0f60af3cafbac468122c4a035f348
|
[
"MIT"
] | 1
|
2021-12-05T13:25:31.000Z
|
2021-12-05T13:25:31.000Z
|
from insertion_sort import insertion_sort
def test_insertion_sort():
assert insertion_sort([8,4,23,42,16,15]) == [4,8,15,16,23,42]
| 26.8
| 63
| 0.746269
| 25
| 134
| 3.8
| 0.52
| 0.547368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165289
| 0.097015
| 134
| 4
| 64
| 33.5
| 0.619835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cbd0908e3f5b3b0101f7b0384d46765a6096579a
| 48
|
py
|
Python
|
multiply.py
|
AdvaitShiralkar/calculatorpython
|
4c9de99c217f2210f574f9c329af887ce728b3dd
|
[
"MIT"
] | null | null | null |
multiply.py
|
AdvaitShiralkar/calculatorpython
|
4c9de99c217f2210f574f9c329af887ce728b3dd
|
[
"MIT"
] | null | null | null |
multiply.py
|
AdvaitShiralkar/calculatorpython
|
4c9de99c217f2210f574f9c329af887ce728b3dd
|
[
"MIT"
] | null | null | null |
def multiply_2(number):
return number * 2
| 16
| 24
| 0.666667
| 7
| 48
| 4.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.25
| 48
| 2
| 25
| 24
| 0.805556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.