hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89f4b89220e6c9020137c93fa4b93f91dde4a3bd
| 487
|
py
|
Python
|
push_notifications/ser.py
|
omss24/django-push-notifications
|
d3863262addc235a021eeeb08a0c17ecc28df668
|
[
"MIT"
] | null | null | null |
push_notifications/ser.py
|
omss24/django-push-notifications
|
d3863262addc235a021eeeb08a0c17ecc28df668
|
[
"MIT"
] | null | null | null |
push_notifications/ser.py
|
omss24/django-push-notifications
|
d3863262addc235a021eeeb08a0c17ecc28df668
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from push_notifications.models import Device, GCMDevice, APNSDevice
from consultant.ser import ConsultantTokenSer
class DeviceSer(serializers.ModelSerializer):
user = ConsultantTokenSer()
class Meta:
model = Device
class GCMDeviceSer(serializers.ModelSerializer):
user = ConsultantTokenSer()
class Meta:
model = GCMDevice
class APNSDeviceSer(serializers.ModelSerializer):
user = ConsultantTokenSer()
class Meta:
model = APNSDevice
| 24.35
| 67
| 0.811088
| 48
| 487
| 8.1875
| 0.458333
| 0.234097
| 0.229008
| 0.366412
| 0.473282
| 0.473282
| 0.473282
| 0
| 0
| 0
| 0
| 0
| 0.125257
| 487
| 20
| 68
| 24.35
| 0.922535
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
d6097551cb5520f6c583c8de98cbd7f8032487b8
| 2,053
|
py
|
Python
|
python/rikai/spark/sql/generated/RikaiModelSchemaVisitor.py
|
chunyang/rikai
|
9eb7f0da68e6c1e27e74de324afdbd8b375df0c3
|
[
"Apache-2.0"
] | null | null | null |
python/rikai/spark/sql/generated/RikaiModelSchemaVisitor.py
|
chunyang/rikai
|
9eb7f0da68e6c1e27e74de324afdbd8b375df0c3
|
[
"Apache-2.0"
] | null | null | null |
python/rikai/spark/sql/generated/RikaiModelSchemaVisitor.py
|
chunyang/rikai
|
9eb7f0da68e6c1e27e74de324afdbd8b375df0c3
|
[
"Apache-2.0"
] | null | null | null |
# Generated from src/main/antlr4/org/apache/spark/sql/ml/parser/RikaiModelSchema.g4 by ANTLR 4.7.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .RikaiModelSchemaParser import RikaiModelSchemaParser
else:
from RikaiModelSchemaParser import RikaiModelSchemaParser
# This class defines a complete generic visitor for a parse tree produced by RikaiModelSchemaParser.
class RikaiModelSchemaVisitor(ParseTreeVisitor):
# Visit a parse tree produced by RikaiModelSchemaParser#schema.
def visitSchema(self, ctx:RikaiModelSchemaParser.SchemaContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#unquotedIdentifier.
def visitUnquotedIdentifier(self, ctx:RikaiModelSchemaParser.UnquotedIdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#structType.
def visitStructType(self, ctx:RikaiModelSchemaParser.StructTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#arrayType.
def visitArrayType(self, ctx:RikaiModelSchemaParser.ArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#nestedStructType.
def visitNestedStructType(self, ctx:RikaiModelSchemaParser.NestedStructTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#nestedArrayType.
def visitNestedArrayType(self, ctx:RikaiModelSchemaParser.NestedArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#plainFieldType.
def visitPlainFieldType(self, ctx:RikaiModelSchemaParser.PlainFieldTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by RikaiModelSchemaParser#structField.
def visitStructField(self, ctx:RikaiModelSchemaParser.StructFieldContext):
return self.visitChildren(ctx)
del RikaiModelSchemaParser
| 38.735849
| 100
| 0.791525
| 201
| 2,053
| 8.044776
| 0.358209
| 0.033395
| 0.055659
| 0.100186
| 0.371058
| 0.371058
| 0.345083
| 0.316017
| 0.316017
| 0.316017
| 0
| 0.003436
| 0.149537
| 2,053
| 53
| 101
| 38.735849
| 0.92268
| 0.357526
| 0
| 0.347826
| 1
| 0
| 0.000769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.347826
| false
| 0
| 0.130435
| 0.347826
| 0.869565
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d625e5e5ddd3743f74d898ae58c9e4710b27e274
| 5,586
|
py
|
Python
|
autumn/__init__.py
|
monash-emu/AuTuMN
|
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
|
[
"BSD-2-Clause-FreeBSD"
] | 14
|
2020-03-11T06:15:30.000Z
|
2022-03-09T03:38:35.000Z
|
autumn/__init__.py
|
monash-emu/AuTuMN
|
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
|
[
"BSD-2-Clause-FreeBSD"
] | 96
|
2020-01-29T05:10:29.000Z
|
2022-03-31T01:48:46.000Z
|
autumn/__init__.py
|
monash-emu/AuTuMN
|
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2020-04-24T00:38:00.000Z
|
2021-08-19T16:19:03.000Z
|
import os
import warnings
# Ignore future warnings they're annoying.
warnings.simplefilter(action="ignore", category=FutureWarning)
# Ensure NumPy only uses 1 thread for matrix multiplication,
# because NumPy is stupid and tries to use heaps of threads,
# which is quite wasteful and makes our models run way more slowly.
# https://stackoverflow.com/questions/30791550/limit-number-of-threads-in-numpy
os.environ["OMP_NUM_THREADS"] = "1"
from autumn.settings import Models, Region
from autumn.tools.registry import register_project
# TB projects
register_project(
Models.TB, Region.MARSHALL_ISLANDS, "autumn.projects.tuberculosis.marshall_islands.project"
)
register_project(Models.TB, Region.PHILIPPINES, "autumn.projects.tuberculosis.philippines.project")
# Example projects
register_project(Models.EXAMPLE, Region.PHILIPPINES, "autumn.projects.example.philippines.project")
register_project(Models.EXAMPLE, Region.VICTORIA_2020, "autumn.projects.example.victoria.project")
# COVID: Victoria state-wide super-model
register_project(
Models.COVID_19,
Region.VICTORIA_2020,
"autumn.projects.covid_19.victoria.victoria_2020.project",
)
register_project(
Models.COVID_19,
Region.VICTORIA_2021,
"autumn.projects.covid_19.victoria.victoria_2021.project",
)
register_project(
Models.COVID_19,
Region.NORTH_EAST_METRO,
"autumn.projects.covid_19.victoria.north_east_metro.project",
)
register_project(
Models.COVID_19,
Region.SOUTH_EAST_METRO,
"autumn.projects.covid_19.victoria.south_east_metro.project",
)
register_project(
Models.COVID_19,
Region.WEST_METRO,
"autumn.projects.covid_19.victoria.west_metro.project",
)
register_project(
Models.COVID_19,
Region.BARWON_SOUTH_WEST,
"autumn.projects.covid_19.victoria.barwon_south_west.project",
)
register_project(
Models.COVID_19,
Region.GIPPSLAND,
"autumn.projects.covid_19.victoria.gippsland.project",
)
register_project(
Models.COVID_19,
Region.GRAMPIANS,
"autumn.projects.covid_19.victoria.grampians.project",
)
register_project(
Models.COVID_19,
Region.HUME,
"autumn.projects.covid_19.victoria.hume.project",
)
register_project(
Models.COVID_19,
Region.LODDON_MALLEE,
"autumn.projects.covid_19.victoria.loddon_mallee.project",
)
# COVID: European mixing optmization
register_project(
Models.COVID_19,
Region.BELGIUM,
"autumn.projects.covid_19.mixing_optimisation.regions.belgium.project",
)
register_project(
Models.COVID_19,
Region.SPAIN,
"autumn.projects.covid_19.mixing_optimisation.regions.spain.project",
)
register_project(
Models.COVID_19,
Region.SWEDEN,
"autumn.projects.covid_19.mixing_optimisation.regions.sweden.project",
)
register_project(
Models.COVID_19,
Region.UNITED_KINGDOM,
"autumn.projects.covid_19.mixing_optimisation.regions.united_kingdom.project",
)
register_project(
Models.COVID_19,
Region.ITALY,
"autumn.projects.covid_19.mixing_optimisation.regions.italy.project",
)
register_project(
Models.COVID_19,
Region.FRANCE,
"autumn.projects.covid_19.mixing_optimisation.regions.france.project",
)
# COVID: Philippines project
register_project(
Models.COVID_19, Region.CALABARZON, "autumn.projects.covid_19.philippines.calabarzon.project"
)
register_project(
Models.COVID_19,
Region.CENTRAL_VISAYAS,
"autumn.projects.covid_19.philippines.central_visayas.project",
)
register_project(
Models.COVID_19, Region.DAVAO_CITY, "autumn.projects.covid_19.philippines.davao_city.project"
)
register_project(
Models.COVID_19,
Region.DAVAO_REGION,
"autumn.projects.covid_19.philippines.davao_region.project",
)
register_project(
Models.COVID_19, Region.MANILA, "autumn.projects.covid_19.philippines.manila.project"
)
register_project(
Models.COVID_19, Region.PHILIPPINES, "autumn.projects.covid_19.philippines.philippines.project"
)
# COVID: Malaysia project
register_project(Models.COVID_19, Region.JOHOR, "autumn.projects.covid_19.malaysia.johor.project")
register_project(
Models.COVID_19, Region.KUALA_LUMPUR, "autumn.projects.covid_19.malaysia.kuala_lumpur.project"
)
register_project(
Models.COVID_19, Region.MALAYSIA, "autumn.projects.covid_19.malaysia.malaysia.project"
)
register_project(Models.COVID_19, Region.PENANG, "autumn.projects.covid_19.malaysia.penang.project")
register_project(Models.COVID_19, Region.SABAH, "autumn.projects.covid_19.malaysia.sabah.project")
register_project(
Models.COVID_19, Region.SELANGOR, "autumn.projects.covid_19.malaysia.selangor.project"
)
# Nepal
register_project(Models.COVID_19, Region.NEPAL, "autumn.projects.covid_19.nepal.project")
# Sri Lanka
register_project(
Models.COVID_19, Region.SRI_LANKA, "autumn.projects.covid_19.sri_lanka.sri_lanka.project"
)
# Indonesia & Bali
register_project(Models.COVID_19, Region.BALI, "autumn.projects.covid_19.indonesia.bali.project")
register_project(
Models.COVID_19, Region.INDONESIA, "autumn.projects.covid_19.indonesia.indonesia.project"
)
# Vietnam
register_project(
Models.COVID_19, Region.VIETNAM, "autumn.projects.covid_19.vietnam.vietnam.project"
)
register_project(
Models.COVID_19,
Region.HO_CHI_MINH_CITY,
"autumn.projects.covid_19.vietnam.ho_chi_minh_city.project",
)
register_project(Models.COVID_19, Region.MYANMAR, "autumn.projects.covid_19.myanmar.project")
# COVID: Victoria project
# FIXME: Parameter validation issues
# register_project(Models.COVID_19, Region.VICTORIA, "autumn.projects.covid_19.victoria.project")
| 28.943005
| 100
| 0.786609
| 710
| 5,586
| 5.959155
| 0.177465
| 0.119121
| 0.198535
| 0.221224
| 0.668164
| 0.482628
| 0.399433
| 0.090995
| 0.023635
| 0
| 0
| 0.034828
| 0.105621
| 5,586
| 192
| 101
| 29.09375
| 0.81205
| 0.116899
| 0
| 0.342657
| 0
| 0
| 0.431217
| 0.42674
| 0
| 0
| 0
| 0.005208
| 0
| 1
| 0
| true
| 0
| 0.027972
| 0
| 0.027972
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c384e185a028dbe0d164fb2a713c781e03517dfc
| 146
|
py
|
Python
|
exercise/traceback_test.py
|
progzc/PythonDemo
|
0515fee3511bc132bfddf480014f61ce52080616
|
[
"Apache-2.0"
] | null | null | null |
exercise/traceback_test.py
|
progzc/PythonDemo
|
0515fee3511bc132bfddf480014f61ce52080616
|
[
"Apache-2.0"
] | null | null | null |
exercise/traceback_test.py
|
progzc/PythonDemo
|
0515fee3511bc132bfddf480014f61ce52080616
|
[
"Apache-2.0"
] | null | null | null |
import traceback
# 使用traceback模块打印异常信息
# 注意顺序的随机性
try:
print('-------------------------')
print(1 / 0)
except:
traceback.print_exc()
| 14.6
| 38
| 0.554795
| 13
| 146
| 6.153846
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.178082
| 146
| 9
| 39
| 16.222222
| 0.65
| 0.191781
| 0
| 0
| 0
| 0
| 0.217391
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
c3a24d5a1423176a71ca5d88b9915517f13181f5
| 34,574
|
py
|
Python
|
tests/test_gmm.py
|
EricKightley/sparseklearn
|
d5d1f42c0572972ea3f4702734f82066ae7270e3
|
[
"MIT"
] | 3
|
2018-02-08T08:35:54.000Z
|
2020-02-19T21:50:28.000Z
|
tests/test_gmm.py
|
EricKightley/sparseklearn
|
d5d1f42c0572972ea3f4702734f82066ae7270e3
|
[
"MIT"
] | 1
|
2020-07-07T05:23:52.000Z
|
2020-07-08T13:57:48.000Z
|
tests/test_gmm.py
|
EricKightley/sparseklearn
|
d5d1f42c0572972ea3f4702734f82066ae7270e3
|
[
"MIT"
] | 1
|
2019-10-07T03:56:41.000Z
|
2019-10-07T03:56:41.000Z
|
import unittest
import numpy as np
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
from sklearn.mixture.gaussian_mixture import _estimate_log_gaussian_prob
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters
from sklearn.mixture import GaussianMixture as GMSKL
from sparseklearn import GaussianMixture
from tests import DataGenerator
class TestGaussianMixture(unittest.TestCase):
def assertArrayEqual(self, x, y):
self.assertTrue(np.allclose(x, y, rtol=1e-6))
def setUp(self):
self.td = DataGenerator()
#gmm = GaussianMixture(n_components = 3,
# num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,
# num_samp = 4, transform = 'dct',
# D_indices = self.td.D_indices, mask = self.td.mask)
#self.gmm = gmm
def test_fit_sparsifier(self):
gmm = GaussianMixture(n_components = 3,
num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,
num_samp = 4, transform = 'dct',
D_indices = self.td.D_indices, mask = self.td.mask)
gmm.fit_sparsifier(X = self.td.X)
self.assertArrayEqual(self.td.RHDX, gmm.RHDX)
self.assertArrayEqual(self.td.mask, gmm.mask)
self.assertEqual(self.td.N, gmm.num_samp)
self.assertEqual(self.td.Q, gmm.num_feat_comp)
self.assertEqual(self.td.P, gmm.num_feat_full)
def instantiate_standard_gmm(self, random_state):
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
random_state = random_state)
return gmm
###########################################################################
###########################################################################
##### E-STEP ######
###########################################################################
###########################################################################
def test_pairwise_mahalanobis_distances(self):
""" pairwise_mahalanobis_distances is a Sparsifier function, but a use
case here made me suspect that it's wrong. To confirm this I'm putting
a test here first. Will need to make a new one (and probably ammend
existing ones that are currently passing but shouldn't be) in that
test suite once I am convinced it's the culprit. """
cov_type = 'spherical'
rs = np.random.RandomState(10)
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 3, num_feat_shared = 2, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
random_state = rs)
gmm.fit_sparsifier(X = self.td.X)
means = rs.rand(gmm.n_components, gmm.num_feat_full)
covariances = rs.rand(gmm.n_components)
mahadist_test = gmm.pairwise_mahalanobis_distances(means, covariances,
cov_type)**2
#undo the rescaling due to compression
mahadist_test *= gmm.num_feat_comp/gmm.num_feat_full
mahadist_true = np.zeros_like(mahadist_test)
for data_ind in range(gmm.num_samp):
for comp_ind in range(gmm.n_components):
mahadist_true[data_ind, comp_ind] = 1/covariances[comp_ind] * \
np.linalg.norm(gmm.RHDX[data_ind] -
means[comp_ind][gmm.mask[data_ind]])**2
self.assertArrayEqual(mahadist_test, mahadist_true)
def test_hand_computation_of_log_prob_vs_sklearn(self):
""" Something seems wrong with my mahadist computation. Before digging
further into the C library to find the error, I want to make sure that
the results I think it should give are right. One way to gather
evidence in favor of this conclusion is to use the result in the
computation of the log probability (this is what led me here in the
first place). This test does so, and consequently doesn't actually
test any of the code in gmm.py. For this to work the mask must be
entirely shared. """
cov_type = 'spherical'
rs = np.random.RandomState(10)
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 3, num_feat_shared = 3, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
random_state = rs)
gmm.fit_sparsifier(X = self.td.X)
means = rs.rand(gmm.n_components, gmm.num_feat_full)
covariances = rs.rand(gmm.n_components)
precisions = _compute_precision_cholesky(covariances, cov_type)
# this is where we need the mask to be shared, so that all mask rows
# equal mask[0]
masked_means = means[:, gmm.mask[0]]
log_prob_true = _estimate_log_gaussian_prob(gmm.RHDX, masked_means,
precisions, cov_type)
log_prob_test = np.zeros((gmm.num_samp, gmm.n_components))
for data_ind in range(gmm.num_samp):
for comp_ind in range(gmm.n_components):
test_const = gmm.num_feat_comp * np.log(2*np.pi)
test_logdet = gmm.num_feat_comp * np.log(covariances[comp_ind])
test_mahadist = 1/covariances[comp_ind] * \
np.linalg.norm(gmm.RHDX[data_ind] -
means[comp_ind][gmm.mask[data_ind]])**2
log_prob_test[data_ind, comp_ind] = -.5*(test_const + \
test_logdet + test_mahadist)
self.assertArrayEqual(log_prob_test, log_prob_true)
def test__compute_logdet_array_spherical(self):
""" Test spherical logdet under compression on an example
computed here. Redundant with test__compute_logdet_array below but was
implemented to confirm that test is correct. """
cov_type = 'spherical'
rs = np.random.RandomState(10)
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 3, num_feat_shared = 2, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
random_state = rs)
gmm.fit_sparsifier(X = self.td.X)
means = rs.rand(gmm.n_components, gmm.num_feat_full)
covariances = rs.rand(gmm.n_components)
logdet_test = gmm._compute_logdet_array(covariances, 'spherical')
logdet_true = gmm.num_feat_comp * np.log(covariances)
logdet_true = np.tile(logdet_true, (gmm.num_samp, 1))
self.assertArrayEqual(logdet_test, logdet_true)
def test__compute_logdet_array(self):
""" Test spherical and diagonal on hard-coded results. """
gmm = GaussianMixture(n_components = 3,
num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,
num_samp = 4, transform = 'dct',
D_indices = self.td.D_indices, mask = self.td.mask)
logdet_spherical = gmm._compute_logdet_array(self.td.spherical_covariances, 'spherical')
logdet_diag = gmm._compute_logdet_array(self.td.diagonal_covariances, 'diag')
self.assertArrayEqual(self.td.correct_logdet_spherical, logdet_spherical)
self.assertArrayEqual(self.td.correct_logdet_diag, logdet_diag)
def test__compute_log_prob_spherical_no_compression(self):
""" Compare the log_prob computation to that of sklearn with no
compression. Implemented as a precursor to testing it with
compression, to follow. Spherical covariances. """
cov_type = 'spherical'
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 5, num_feat_shared = 5, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type)
gmm.fit_sparsifier(X = self.td.X)
means = np.random.rand(gmm.n_components, gmm.num_feat_comp)
covariances = np.random.rand(gmm.n_components)
log_prob_test = gmm._compute_log_prob(means, covariances, cov_type)
precisions = _compute_precision_cholesky(covariances, cov_type)
log_prob_true = _estimate_log_gaussian_prob(self.td.X, means, precisions, cov_type)
self.assertArrayEqual(log_prob_test, log_prob_true)
def test__compute_log_prob_spherical_shared_compression(self):
""" Compare the log_prob computation to that of sklearn with
shared compression. Spherical covariances. """
cov_type = 'spherical'
rs = np.random.RandomState(10)
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 3, num_feat_shared = 3, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
random_state = rs)
gmm.fit_sparsifier(X = self.td.X)
means = rs.rand(gmm.n_components, gmm.num_feat_full)
covariances = rs.rand(gmm.n_components)
log_prob_test = gmm._compute_log_prob(means, covariances, cov_type)
log_prob_true = np.zeros((gmm.num_samp, gmm.n_components))
for data_ind in range(gmm.num_samp):
for comp_ind in range(gmm.n_components):
true_const = gmm.num_feat_comp * np.log(2*np.pi)
true_logdet = gmm.num_feat_comp * np.log(covariances[comp_ind])
true_mahadist = 1/covariances[comp_ind] * \
np.linalg.norm(gmm.RHDX[data_ind] -
means[comp_ind][gmm.mask[data_ind]])**2
log_prob_true[data_ind, comp_ind] = -.5*(true_const + \
true_logdet + true_mahadist)
self.assertArrayEqual(log_prob_test, log_prob_true)
def test__compute_log_prob_diagonal_no_compression(self):
""" Compare the log_prob computation to that of sklearn with no
compression. Implemented as a precursor to testing it with
compression, to follow. Diagonal covariances. """
cov_type = 'diag'
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 5, num_feat_shared = 5, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type)
gmm.fit_sparsifier(X = self.td.X)
means = np.random.rand(gmm.n_components, gmm.num_feat_comp)
covariances = np.random.rand(gmm.n_components, gmm.num_feat_comp)
log_prob_test = gmm._compute_log_prob(means, covariances, cov_type)
precisions = _compute_precision_cholesky(covariances, cov_type)
log_prob_true = _estimate_log_gaussian_prob(self.td.X, means, precisions, cov_type)
self.assertArrayEqual(log_prob_test, log_prob_true)
def test__compute_log_prob(self):
""" This test should probably get implemented eventually. It corresponds
to testing the wrapper around fastLA.pairwise_mahalanobis_distances and
gmm._compute_logdet_array. Each of these has tests for spherical and
diagonal cases with sparsification.
Currently we have tests of:
- component functions in _compute_log_prob
includes diag and spherical on compressed data with
sparsification
- _compute_log_prob on dense data
tests against sklearn
I can't currently think of a way to implement a test for this that isn't
a trivial replication of those earlier tests.
"""
#TODO
def test__estimate_log_prob_resp_spherical_no_compression(self):
cov_type = 'spherical'
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 5, num_feat_shared = 5, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type)
gmm.fit_sparsifier(X = self.td.X)
means = np.random.rand(gmm.n_components, gmm.num_feat_comp)
covariances = np.random.rand(gmm.n_components)
weights = np.random.rand(gmm.n_components)
weights /= weights.sum()
log_prob_test, log_resp_test, log_prob_norm_test = gmm._estimate_log_prob_resp(
weights, means, covariances, cov_type)
# find skl's values, pretty ugly to do.
precisions = _compute_precision_cholesky(covariances, cov_type)
gmm_skl = GMSKL(n_components = 3, covariance_type = cov_type)
gmm_skl.means_ = means
gmm_skl.precisions_cholesky_ = precisions
gmm_skl.weights_ = weights
gmm_skl.covariance_type_ = cov_type
log_prob_norm_true, log_resp_true = gmm_skl._estimate_log_prob_resp(self.td.X)
# if anything is bad later this overwrite with mean seems suspect:
log_prob_norm_true = log_prob_norm_true.mean()
# now get the log_prob from another function
log_prob_true = _estimate_log_gaussian_prob(self.td.X, means, precisions, cov_type)
# run the tests
self.assertArrayEqual(log_prob_test, log_prob_true)
self.assertArrayEqual(log_prob_norm_true, log_prob_norm_test)
self.assertArrayEqual(log_resp_true, log_resp_test)
def test__estimate_log_prob_resp_diagonal_no_compression(self):
cov_type = 'diag'
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 5, num_feat_shared = 5, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type)
gmm.fit_sparsifier(X = self.td.X)
means = np.random.rand(gmm.n_components, gmm.num_feat_comp)
covariances = np.random.rand(gmm.n_components, gmm.num_feat_comp)
weights = np.random.rand(gmm.n_components)
weights /= weights.sum()
log_prob_test, log_resp_test, log_prob_norm_test = gmm._estimate_log_prob_resp(
weights, means, covariances, cov_type)
# find skl's values, pretty ugly to do.
precisions = _compute_precision_cholesky(covariances, cov_type)
gmm_skl = GMSKL(n_components = 3, covariance_type = cov_type)
gmm_skl.means_ = means
gmm_skl.precisions_cholesky_ = precisions
gmm_skl.weights_ = weights
gmm_skl.covariance_type_ = cov_type
log_prob_norm_true, log_resp_true = gmm_skl._estimate_log_prob_resp(self.td.X)
# if anything is bad later this overwrite with mean seems suspect:
log_prob_norm_true = log_prob_norm_true.mean()
# now get the log_prob from another function
log_prob_true = _estimate_log_gaussian_prob(self.td.X, means, precisions, cov_type)
# run the tests
self.assertArrayEqual(log_prob_test, log_prob_true)
self.assertArrayEqual(log_prob_norm_true, log_prob_norm_test)
self.assertArrayEqual(log_resp_true, log_resp_test)
# test failing
def test__estimate_log_prob_resp_spherical_shared_compression(self):
rs = np.random.RandomState(11)
cov_type = 'spherical'
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 3, num_feat_shared = 3, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
random_state = rs)
gmm.fit_sparsifier(X = self.td.X)
means = rs.rand(gmm.n_components, gmm.num_feat_full)
covariances = rs.rand(gmm.n_components)
weights = rs.rand(gmm.n_components)
weights /= weights.sum()
log_prob_test, log_resp_test, log_prob_norm_test = gmm._estimate_log_prob_resp(
weights, means, covariances, cov_type)
# find skl's values, pretty ugly to do.
precisions = _compute_precision_cholesky(covariances, cov_type)
gmm_skl = GMSKL(n_components = 3, covariance_type = cov_type)
# we need the mask to be shared so that we can use mask[0] on all means
gmm_skl.means_ = means[:, gmm.mask[0]]
gmm_skl.precisions_cholesky_ = precisions
gmm_skl.weights_ = weights
gmm_skl.covariance_type_ = cov_type
log_prob_norm_true, log_resp_true = gmm_skl._estimate_log_prob_resp(gmm.RHDX)
# if anything is bad later this overwrite with mean seems suspect:
log_prob_norm_true = log_prob_norm_true.mean()
# now get the log_prob from another function
log_prob_true = _estimate_log_gaussian_prob(gmm.RHDX, gmm_skl.means_,
precisions, cov_type)
# run the tests
self.assertArrayEqual(log_prob_test, log_prob_true)
self.assertArrayEqual(log_prob_norm_true, log_prob_norm_test)
self.assertArrayEqual(log_resp_true, log_resp_test)
###########################################################################
###########################################################################
##### M-STEP ######
###########################################################################
###########################################################################
def test__estimate_gaussian_parameters_spherical_no_compression(self):
""" Test _estiamte_gaussian_parameters against sklearn's
implementation. Spherical covariances, no compression.
"""
cov_type = 'spherical'
reg_covar = 1e-6
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 5, num_feat_shared = 5, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
reg_covar = reg_covar)
gmm.fit_sparsifier(X = self.td.X)
resp = np.random.rand(gmm.num_samp, gmm.n_components)
weights_test, means_test, covariances_test = gmm._estimate_gaussian_parameters(resp, cov_type)
# skl
counts_true, means_true, covariances_true = _estimate_gaussian_parameters(
self.td.X, resp, reg_covar, cov_type)
# skl returns counts instead of weights.
weights_true = counts_true / gmm.num_samp
self.assertArrayEqual(weights_test, weights_true)
self.assertArrayEqual(means_test, means_true)
self.assertArrayEqual(covariances_test, covariances_true)
def test__estimate_gaussian_parameters_diagonal_no_compression(self):
""" Test _estiamte_gaussian_parameters against sklearn's
implementation. Diagonal covariances, no compression.
"""
cov_type = 'diag'
reg_covar = 1e-6
gmm = GaussianMixture(n_components = 3, num_feat_full = 5,
num_feat_comp = 5, num_feat_shared = 5, num_samp = 4, transform = None,
mask = None, D_indices = None, covariance_type = cov_type,
reg_covar = reg_covar)
gmm.fit_sparsifier(X = self.td.X)
resp = np.random.rand(gmm.num_samp, gmm.n_components)
weights_test, means_test, covariances_test = gmm._estimate_gaussian_parameters(resp, cov_type)
# skl
counts_true, means_true, covariances_true = _estimate_gaussian_parameters(
self.td.X, resp, reg_covar, cov_type)
# skl returns counts instead of weights.
weights_true = counts_true / gmm.num_samp
self.assertArrayEqual(weights_test, weights_true)
self.assertArrayEqual(means_test, means_true)
self.assertArrayEqual(covariances_test, covariances_true)
def test__estimate_gaussian_means_and_covariances_diagonal_no_compression(self):
""" Test _estimate_gaussian_means_and_covariances against hard-coded
example. Should be redundant with test__estimate_gaussian_parameters_*
tests above, which test against sklearn's results. """
X = np.array([[0,1,0,0],
[1,0,0,0],
[0,0,1,2]], dtype = np.float64)
gmm = GaussianMixture(n_components = 2, num_feat_full = 4, num_feat_comp = 4,
num_feat_shared = 4, num_samp = 3, transform = None,
D_indices = None, mask = None, reg_covar = 0)
gmm.fit_sparsifier(X = X)
# note: columns should sum to 1, but don't have to because the weighted
# means computation has to renormalize anyway to account for the mask
resp = np.array([[.6, .3],
[.4, .2],
[ 0, .5]], dtype = np.float64)
means, covariances = gmm._estimate_gaussian_means_and_covariances(resp, 'diag')
correct_means = np.array([[ .4, .6, .0, .0],
[ .2, .3, .5, 1.]], dtype=np.float64)
correct_covariances = np.array([[.24, .24, 0, 0],
[.16, .21, .25, 1]], dtype=np.float64)
self.assertArrayEqual(correct_means, means)
self.assertArrayEqual(correct_covariances, covariances)
def test__estimate_gaussian_weights(self):
""" Weights are testsed in test__estimate_gaussian_parameters_* above.
Should not need to implement this unless we want to further test on
compressed case.
"""
#TODO
return 1
###########################################################################
###########################################################################
##### Initialization ######
###########################################################################
###########################################################################
def test__initialize_means_case1(self):
""" means_init is a 2D array.
"""
random_state = np.random.RandomState(12)
means_init_true = random_state.rand(self.td.K, self.td.P)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
means_init_test = gmm._initialize_means()
self.assertArrayEqual(means_init_test, means_init_true)
def test__initialize_means_case2(self):
""" means_init is a 3D array.
"""
random_state = np.random.RandomState(12)
n_init = 3
means_init_true = random_state.rand(n_init, self.td.K, self.td.P)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
n_init = n_init,
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
# first one is discarded for this test
_ = gmm._initialize_means()
# this should recover the second one
means_init_test = gmm._initialize_means()
self.assertArrayEqual(means_init_test, means_init_true[1])
def test__initialize_means_case3(self):
""" means_init is None, init_params is 'kmpp'.
Only checks that the initialized means are of the correct shape.
"""
random_state = np.random.RandomState(12)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = None,
init_params = 'kmpp',
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
means_init_shape_test = gmm._initialize_means().shape
means_init_shape_true = np.array([self.td.K, self.td.P])
self.assertArrayEqual(means_init_shape_test, means_init_shape_true)
def test__initialize_means_case4(self):
""" means_init is None, init_params is 'random'.
Only checks that the initialized means are of the correct shape.
"""
random_state = np.random.RandomState(12)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = None,
init_params = 'random',
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
means_init_shape_test = gmm._initialize_means().shape
means_init_shape_true = np.array([self.td.K, self.td.P])
self.assertArrayEqual(means_init_shape_test, means_init_shape_true)
def test__initialize_covariances_case1(self):
""" spherical covariance, 1 init.
"""
random_state = np.random.RandomState(12)
means_init_true = random_state.rand(self.td.K, self.td.P)
covariances_init_true = random_state.rand(self.td.K)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
covariances_init = covariances_init_true,
covariance_type = 'spherical',
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
means_init = gmm._initialize_means()
covariances_init_test = gmm._initialize_covariances(means_init)
self.assertArrayEqual(covariances_init_test, covariances_init_true)
def test__initialize_covariances_case2(self):
""" diagonal covariance, 1 init.
"""
random_state = np.random.RandomState(12)
means_init_true = random_state.rand(self.td.K, self.td.P)
covariances_init_true = random_state.rand(self.td.K, self.td.P)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
covariances_init = covariances_init_true,
covariance_type = 'diag',
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
means_init = gmm._initialize_means()
covariances_init_test = gmm._initialize_covariances(means_init)
self.assertArrayEqual(covariances_init_test, covariances_init_true)
def test__initialize_covariances_case3(self):
""" No covariances given, just check shape.
"""
random_state = np.random.RandomState(12)
means_init_true = random_state.rand(self.td.K, self.td.P)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
covariances_init = None,
covariance_type = 'diag',
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
means_init = gmm._initialize_means()
covariances_init_test = gmm._initialize_covariances(means_init)
true_shape = np.array((self.td.K, self.td.P))
self.assertArrayEqual(covariances_init_test.shape, true_shape)
def test__initialize_covariances_case4(self):
""" diagonal covariance, multi-init.
"""
random_state = np.random.RandomState(12)
n_init = 3
means_init_true = random_state.rand(n_init, self.td.K, self.td.P)
covariances_init_true = random_state.rand(n_init, self.td.K, self.td.P)
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
covariances_init = covariances_init_true,
covariance_type = 'diag',
n_init = n_init,
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
# init means twice to cycle covariances
_ = gmm._initialize_means()
means_init = gmm._initialize_means()
covariances_init_test = gmm._initialize_covariances(means_init)
self.assertArrayEqual(covariances_init_test, covariances_init_true[1])
def test__initialize_weights_case3(self):
""" multi-init
"""
random_state = np.random.RandomState(12)
n_init = 3
means_init_true = random_state.rand(n_init, self.td.K, self.td.P)
weights_init_true = random_state.rand(n_init, self.td.K)
weights_init_true /= weights_init_true.sum(axis=1)[:,np.newaxis]
gmm = GaussianMixture(n_components = self.td.K,
num_feat_full = self.td.P,
num_feat_comp = self.td.Q,
num_feat_shared = self.td.Qs,
num_samp = self.td.N,
transform = self.td.transform,
D_indices = self.td.D_indices,
mask = self.td.mask,
means_init = means_init_true,
weights_init = weights_init_true,
n_init = n_init,
covariance_type = 'diag',
random_state = random_state)
gmm.fit_sparsifier(X=self.td.X)
# init means twice to cycle covariances
_ = gmm._initialize_means()
means_init = gmm._initialize_means()
weights_init_test = gmm._initialize_weights(means_init)
self.assertArrayEqual(weights_init_test, weights_init_true[1])
def test__init_resp_from_means(self):
gmm = GaussianMixture(n_components = 3,
num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,
num_samp = 4, transform = 'dct',
D_indices = self.td.D_indices, mask = self.td.mask)
gmm.fit_sparsifier(X=self.td.X)
resp_test = gmm._init_resp_from_means(self.td.U)
resp_correct = np.array([[0, 1, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0]], dtype = int)
def test__initialize_parameters(self):
""" Only tests if it runs. """
init_params = 'random'
means_init = None
gmm = GaussianMixture(n_components = 3,
num_feat_full = 5, num_feat_comp = 3, num_feat_shared = 1,
num_samp = 4, transform = 'dct',
D_indices = self.td.D_indices, mask = self.td.mask,
init_params = init_params,
means_init = means_init)
gmm.fit_sparsifier(HDX=self.td.HDX)
gmm._initialize_parameters()
###########################################################################
###########################################################################
##### Fit ######
###########################################################################
###########################################################################
def test_fit(self):
""" Catches a case where covariance goes to 0."""
reg_covar = 1e-6
np.random.seed(0)
X = np.array([[0,1,0,0],
[1,0,0,0],
[0,0,1,2]], dtype = np.float64)
gmm = GaussianMixture(n_components = 2, covariance_type = 'diag',
num_feat_full = 4, num_feat_comp = 2,
num_feat_shared = 1, num_samp = 3, transform = None, D_indices = None,
mask = None, reg_covar = reg_covar, init_params = 'random', max_iter = 5)
gmm.fit(X=X)
correct_means = np.array([[0, 0, 1, 0],
[1, 0, 0, 0]], dtype = np.float64)
correct_covariances = np.ones_like(correct_means)*reg_covar
self.assertArrayEqual(gmm.means_, correct_means)
self.assertArrayEqual(gmm.covariances_, correct_covariances)
if __name__ == '__main__':
unittest.main()
| 51.220741
| 102
| 0.586857
| 4,189
| 34,574
| 4.531392
| 0.0783
| 0.049942
| 0.023759
| 0.042777
| 0.7662
| 0.740333
| 0.714256
| 0.697345
| 0.693868
| 0.688284
| 0
| 0.010458
| 0.297536
| 34,574
| 674
| 103
| 51.296736
| 0.771112
| 0.135362
| 0
| 0.720081
| 0
| 0
| 0.006442
| 0
| 0
| 0
| 0
| 0.002967
| 0.087221
| 1
| 0.06288
| false
| 0
| 0.016227
| 0
| 0.085193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
c3c29a2e5b1177c7e147cfa8bdfedded391635e1
| 337
|
py
|
Python
|
CangJie/utils/testing.py
|
bigdata-ustc/CangJie
|
a3264082fa0432d257b5c4722b14c55f9092a411
|
[
"MIT"
] | 2
|
2020-03-04T02:27:29.000Z
|
2020-05-22T04:07:24.000Z
|
CangJie/utils/testing.py
|
bigdata-ustc/CangJie
|
a3264082fa0432d257b5c4722b14c55f9092a411
|
[
"MIT"
] | null | null | null |
CangJie/utils/testing.py
|
bigdata-ustc/CangJie
|
a3264082fa0432d257b5c4722b14c55f9092a411
|
[
"MIT"
] | 1
|
2022-03-12T00:31:59.000Z
|
2022-03-12T00:31:59.000Z
|
# coding: utf-8
# 2020/1/2 @ tongshiwei
import random
from CangJie import CHI_CHAR
__all__ = ["pseudo_sentence"]
def _pseudo_sentence(length):
return "".join([random.choice(CHI_CHAR) for _ in range(length)])
def pseudo_sentence(num, max_length):
return [_pseudo_sentence(random.randint(1, max_length)) for _ in range(num)]
| 21.0625
| 80
| 0.735905
| 49
| 337
| 4.734694
| 0.55102
| 0.241379
| 0.146552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027682
| 0.142433
| 337
| 15
| 81
| 22.466667
| 0.775087
| 0.103858
| 0
| 0
| 0
| 0
| 0.050167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.285714
| 0.285714
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
7f0dc60b40dd08456791043a9d3a4291a01edf3b
| 143
|
py
|
Python
|
tanuki/history/urls.py
|
addisonmaupin/capstone2020
|
cf8c8e7336aa9866859349838e4f42bc6831679c
|
[
"MIT"
] | null | null | null |
tanuki/history/urls.py
|
addisonmaupin/capstone2020
|
cf8c8e7336aa9866859349838e4f42bc6831679c
|
[
"MIT"
] | 9
|
2021-03-19T14:50:48.000Z
|
2022-03-12T00:47:25.000Z
|
tanuki/history/urls.py
|
pabsromo/capstone2020
|
cf8c8e7336aa9866859349838e4f42bc6831679c
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'history'
urlpatterns = [
path('history/', views.history, name='history'),
]
| 15.888889
| 52
| 0.692308
| 18
| 143
| 5.444444
| 0.555556
| 0.22449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167832
| 143
| 8
| 53
| 17.875
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
613cb859cc616b5297d4f1b783cb516f1faa4533
| 279
|
py
|
Python
|
tests/test_data.py
|
thoriuchi0531/tutti
|
d0fe202864edc9d257654743db6dc44a67a1d7ed
|
[
"MIT"
] | 1
|
2021-11-14T15:53:38.000Z
|
2021-11-14T15:53:38.000Z
|
tests/test_data.py
|
thoriuchi0531/fipie
|
d0fe202864edc9d257654743db6dc44a67a1d7ed
|
[
"MIT"
] | null | null | null |
tests/test_data.py
|
thoriuchi0531/fipie
|
d0fe202864edc9d257654743db6dc44a67a1d7ed
|
[
"MIT"
] | null | null | null |
import pandas as pd
from fipie.data import load_example_data
def test_load_example_data():
data = load_example_data()
assert isinstance(data, pd.DataFrame)
def test_instrument_size():
data = load_example_data()
# 7 instruments
assert data.shape[1] == 7
| 17.4375
| 41
| 0.724014
| 40
| 279
| 4.775
| 0.5
| 0.230366
| 0.314136
| 0.198953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.193548
| 279
| 15
| 42
| 18.6
| 0.835556
| 0.046595
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
613fb08490d3b7f7630791e55f0173cb7d3c7a88
| 74,326
|
py
|
Python
|
osm_ro/httpserver.py
|
alfonsoegio/RO
|
9228f51e8a0abb2772c4b75a3462835fe336498a
|
[
"Apache-2.0"
] | null | null | null |
osm_ro/httpserver.py
|
alfonsoegio/RO
|
9228f51e8a0abb2772c4b75a3462835fe336498a
|
[
"Apache-2.0"
] | null | null | null |
osm_ro/httpserver.py
|
alfonsoegio/RO
|
9228f51e8a0abb2772c4b75a3462835fe336498a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
'''
HTTP server implementing the openmano API. It will answer to POST, PUT, GET methods in the appropriate URLs
and will use the nfvo.py module to run the appropriate method.
Every YAML/JSON file is checked against a schema in openmano_schemas.py module.
'''
__author__="Alfonso Tierno, Gerardo Garcia"
__date__ ="$17-sep-2014 09:07:15$"
import bottle
import yaml
import threading
import logging
from openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
nsd_schema_v01, nsd_schema_v02, nsd_schema_v03, scenario_edit_schema, \
scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
tenant_schema, tenant_edit_schema,\
datacenter_schema, datacenter_edit_schema, datacenter_action_schema, datacenter_associate_schema,\
object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
sdn_port_mapping_schema, sdn_external_port_schema
from .http_tools import errors as httperrors
from .http_tools.request_processing import (
format_out,
format_in,
filter_query_string
)
from .wim.http_handler import WimHandler
import nfvo
import utils
from db_base import db_base_Exception
from functools import wraps
global mydb
global url_base
global logger
url_base="/openmano"
logger = None
def log_to_logger(fn):
'''
Wrap a Bottle request so that a log line is emitted after it's handled.
(This decorator can be extended to take the desired logger as a param.)
'''
@wraps(fn)
def _log_to_logger(*args, **kwargs):
actual_response = fn(*args, **kwargs)
# modify this to log exactly what you need:
logger.info('FROM %s %s %s %s', bottle.request.remote_addr,
bottle.request.method,
bottle.request.url,
bottle.response.status)
return actual_response
return _log_to_logger
class httpserver(threading.Thread):
def __init__(self, db, admin=False, host='localhost', port=9090,
wim_persistence=None, wim_engine=None):
#global url_base
global mydb
global logger
#initialization
if not logger:
logger = logging.getLogger('openmano.http')
threading.Thread.__init__(self)
self.host = host
self.port = port #Port where the listen service must be started
if admin==True:
self.name = "http_admin"
else:
self.name = "http"
#self.url_preffix = 'http://' + host + ':' + str(port) + url_base
mydb = db
#self.first_usable_connection_index = 10
#self.next_connection_index = self.first_usable_connection_index #The next connection index to be used
#Ensure that when the main program exits the thread will also exit
self.handlers = [
WimHandler(db, wim_persistence, wim_engine, url_base)
]
self.daemon = True
self.setDaemon(True)
def run(self, debug=False, quiet=True):
bottle.install(log_to_logger)
default_app = bottle.app()
for handler in self.handlers:
default_app.merge(handler.wsgi_app)
bottle.run(host=self.host, port=self.port, debug=debug, quiet=quiet)
def run_bottle(db, host_='localhost', port_=9090):
'''Used for launching in main thread, so that it can be debugged'''
server = httpserver(db, host=host_, port=port_)
server.run(debug=True) # quiet=True
@bottle.route(url_base + '/', method='GET')
def http_get():
#print
return 'works' #TODO: to be completed
@bottle.hook('after_request')
def enable_cors():
'''Don't know yet if really needed. Keep it just in case'''
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
@bottle.route(url_base + '/version', method='GET')
def http_get_version():
return nfvo.get_version()
#
# VNFs
#
@bottle.route(url_base + '/tenants', method='GET')
def http_get_tenants():
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
select_,where_,limit_ = filter_query_string(bottle.request.query, None,
('uuid','name','description','created_at') )
try:
tenants = mydb.get_rows(FROM='nfvo_tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
#change_keys_http2db(content, http2db_tenant, reverse=True)
utils.convert_float_timestamp2str(tenants)
data={'tenants' : tenants}
return format_out(data)
except bottle.HTTPError:
raise
except db_base_Exception as e:
logger.error("http_get_tenants error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
def http_get_tenant_id(tenant_id):
'''get tenant details, can use both uuid or name'''
#obtain data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
from_ = 'nfvo_tenants'
select_, where_, limit_ = filter_query_string(bottle.request.query, None,
('uuid', 'name', 'description', 'created_at'))
what = 'uuid' if utils.check_valid_uuid(tenant_id) else 'name'
where_[what] = tenant_id
tenants = mydb.get_rows(FROM=from_, SELECT=select_,WHERE=where_)
#change_keys_http2db(content, http2db_tenant, reverse=True)
if len(tenants) == 0:
bottle.abort(httperrors.Not_Found, "No tenant found with {}='{}'".format(what, tenant_id))
elif len(tenants) > 1:
bottle.abort(httperrors.Bad_Request, "More than one tenant found with {}='{}'".format(what, tenant_id))
utils.convert_float_timestamp2str(tenants[0])
data = {'tenant': tenants[0]}
return format_out(data)
except bottle.HTTPError:
raise
except db_base_Exception as e:
logger.error("http_get_tenant_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/tenants', method='POST')
def http_post_tenants():
'''insert a tenant into the catalogue. '''
#parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in( tenant_schema )
r = utils.remove_extra_items(http_content, tenant_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
data = nfvo.new_tenant(mydb, http_content['tenant'])
return http_get_tenant_id(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_tenants error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
def http_edit_tenant_id(tenant_id):
'''edit tenant details, can use both uuid or name'''
#parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in( tenant_edit_schema )
r = utils.remove_extra_items(http_content, tenant_edit_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
#obtain data, check that only one exist
try:
tenant = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
#edit data
tenant_id = tenant['uuid']
where={'uuid': tenant['uuid']}
mydb.update_rows('nfvo_tenants', http_content['tenant'], where)
return http_get_tenant_id(tenant_id)
except bottle.HTTPError:
raise
except db_base_Exception as e:
logger.error("http_edit_tenant_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
def http_delete_tenant_id(tenant_id):
'''delete a tenant from database, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
data = nfvo.delete_tenant(mydb, tenant_id)
return format_out({"result":"tenant " + data + " deleted"})
except bottle.HTTPError:
raise
except db_base_Exception as e:
logger.error("http_delete_tenant_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters', method='GET')
def http_get_datacenters(tenant_id):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
if tenant_id != 'any':
#check valid tenant_id
nfvo.check_tenant(mydb, tenant_id)
select_,where_,limit_ = filter_query_string(bottle.request.query, None,
('uuid','name','vim_url','type','created_at') )
if tenant_id != 'any':
where_['nfvo_tenant_id'] = tenant_id
if 'created_at' in select_:
select_[ select_.index('created_at') ] = 'd.created_at as created_at'
if 'created_at' in where_:
where_['d.created_at'] = where_.pop('created_at')
datacenters = mydb.get_rows(FROM='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id',
SELECT=select_,WHERE=where_,LIMIT=limit_)
else:
datacenters = mydb.get_rows(FROM='datacenters',
SELECT=select_,WHERE=where_,LIMIT=limit_)
#change_keys_http2db(content, http2db_tenant, reverse=True)
utils.convert_float_timestamp2str(datacenters)
data={'datacenters' : datacenters}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_datacenters error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='GET')
@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='GET')
def http_get_vim_account(tenant_id, vim_account_id=None):
'''get vim_account list/details, '''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
select_ = ('uuid', 'name', 'dt.datacenter_id as vim_id', 'vim_tenant_name', 'vim_tenant_id', 'user', 'config',
'dt.created_at as created_at', 'passwd')
where_ = {'nfvo_tenant_id': tenant_id}
if vim_account_id:
where_['dt.uuid'] = vim_account_id
from_ = 'tenants_datacenters as td join datacenter_tenants as dt on dt.uuid=td.datacenter_tenant_id'
vim_accounts = mydb.get_rows(SELECT=select_, FROM=from_, WHERE=where_)
if len(vim_accounts) == 0 and vim_account_id:
bottle.abort(HTTP_Not_Found, "No vim_account found for tenant {} and id '{}'".format(tenant_id,
vim_account_id))
for vim_account in vim_accounts:
if vim_account["passwd"]:
vim_account["passwd"] = "******"
if vim_account['config'] != None:
try:
config_dict = yaml.load(vim_account['config'])
vim_account['config'] = config_dict
if vim_account['config'].get('admin_password'):
vim_account['config']['admin_password'] = "******"
if vim_account['config'].get('vcenter_password'):
vim_account['config']['vcenter_password'] = "******"
if vim_account['config'].get('nsx_password'):
vim_account['config']['nsx_password'] = "******"
except Exception as e:
logger.error("Exception '%s' while trying to load config information", str(e))
# change_keys_http2db(content, http2db_datacenter, reverse=True)
#convert_datetime2str(vim_account)
if vim_account_id:
return format_out({"datacenter": vim_accounts[0]})
else:
return format_out({"datacenters": vim_accounts})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='GET')
def http_get_datacenter_id(tenant_id, datacenter_id):
'''get datacenter details, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
if tenant_id != 'any':
#check valid tenant_id
nfvo.check_tenant(mydb, tenant_id)
#obtain data
what = 'uuid' if utils.check_valid_uuid(datacenter_id) else 'name'
where_={}
where_[what] = datacenter_id
select_=['uuid', 'name','vim_url', 'vim_url_admin', 'type', 'd.config as config', 'description', 'd.created_at as created_at']
if tenant_id != 'any':
select_.append("datacenter_tenant_id")
where_['td.nfvo_tenant_id']= tenant_id
from_='datacenters as d join tenants_datacenters as td on d.uuid=td.datacenter_id'
else:
from_='datacenters as d'
datacenters = mydb.get_rows(
SELECT=select_,
FROM=from_,
WHERE=where_)
if len(datacenters)==0:
bottle.abort( httperrors.Not_Found, "No datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
elif len(datacenters)>1:
bottle.abort( httperrors.Bad_Request, "More than one datacenter found for tenant with {} '{}'".format(what, datacenter_id) )
datacenter = datacenters[0]
if tenant_id != 'any':
#get vim tenant info
vim_tenants = mydb.get_rows(
SELECT=("vim_tenant_name", "vim_tenant_id", "user", "passwd", "config"),
FROM="datacenter_tenants",
WHERE={"uuid": datacenters[0]["datacenter_tenant_id"]},
ORDER_BY=("created", ) )
del datacenter["datacenter_tenant_id"]
datacenter["vim_tenants"] = vim_tenants
for vim_tenant in vim_tenants:
if vim_tenant["passwd"]:
vim_tenant["passwd"] = "******"
if vim_tenant['config'] != None:
try:
config_dict = yaml.load(vim_tenant['config'])
vim_tenant['config'] = config_dict
if vim_tenant['config'].get('admin_password'):
vim_tenant['config']['admin_password'] = "******"
if vim_tenant['config'].get('vcenter_password'):
vim_tenant['config']['vcenter_password'] = "******"
if vim_tenant['config'].get('nsx_password'):
vim_tenant['config']['nsx_password'] = "******"
except Exception as e:
logger.error("Exception '%s' while trying to load config information", str(e))
if datacenter['config'] != None:
try:
config_dict = yaml.load(datacenter['config'])
datacenter['config'] = config_dict
if datacenter['config'].get('admin_password'):
datacenter['config']['admin_password'] = "******"
if datacenter['config'].get('vcenter_password'):
datacenter['config']['vcenter_password'] = "******"
if datacenter['config'].get('nsx_password'):
datacenter['config']['nsx_password'] = "******"
except Exception as e:
logger.error("Exception '%s' while trying to load config information", str(e))
#change_keys_http2db(content, http2db_datacenter, reverse=True)
utils.convert_float_timestamp2str(datacenter)
data={'datacenter' : datacenter}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/datacenters', method='POST')
def http_post_datacenters():
'''insert a datacenter into the catalogue. '''
#parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in(datacenter_schema, confidential_data=True)
r = utils.remove_extra_items(http_content, datacenter_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
data = nfvo.new_datacenter(mydb, http_content['datacenter'])
return http_get_datacenter_id('any', data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_datacenters error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/datacenters/<datacenter_id_name>', method='PUT')
def http_edit_datacenter_id(datacenter_id_name):
'''edit datacenter details, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#parse input data
http_content,_ = format_in( datacenter_edit_schema )
r = utils.remove_extra_items(http_content, datacenter_edit_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
datacenter_id = nfvo.edit_datacenter(mydb, datacenter_id_name, http_content['datacenter'])
return http_get_datacenter_id('any', datacenter_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_edit_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
def http_post_sdn_controller(tenant_id):
'''insert a sdn controller into the catalogue. '''
#parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in( sdn_controller_schema )
try:
logger.debug("tenant_id: "+tenant_id)
#logger.debug("content: {}".format(http_content['sdn_controller']))
data = nfvo.sdn_controller_create(mydb, tenant_id, http_content['sdn_controller'])
return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, data)})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
def http_put_sdn_controller_update(tenant_id, controller_id):
'''Update sdn controller'''
#parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in( sdn_controller_edit_schema )
# r = utils.remove_extra_items(http_content, datacenter_schema)
# if r:
# logger.debug("Remove received extra items %s", str(r))
try:
#logger.debug("tenant_id: "+tenant_id)
logger.debug("content: {}".format(http_content['sdn_controller']))
data = nfvo.sdn_controller_update(mydb, tenant_id, controller_id, http_content['sdn_controller'])
return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, controller_id)})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
def http_get_sdn_controller(tenant_id):
'''get sdn controllers list, can use both uuid or name'''
try:
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
data = {'sdn_controllers': nfvo.sdn_controller_list(mydb, tenant_id)}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_sdn_controller error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
def http_get_sdn_controller_id(tenant_id, controller_id):
'''get sdn controller details, can use both uuid or name'''
try:
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
data = nfvo.sdn_controller_list(mydb, tenant_id, controller_id)
return format_out({"sdn_controllers": data})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
def http_delete_sdn_controller_id(tenant_id, controller_id):
'''delete sdn controller, can use both uuid or name'''
try:
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
data = nfvo.sdn_controller_delete(mydb, tenant_id, controller_id)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
'''Set the sdn port mapping for a datacenter. '''
#parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, _ = format_in(sdn_port_mapping_schema)
# r = utils.remove_extra_items(http_content, datacenter_schema)
# if r:
# logger.debug("Remove received extra items %s", str(r))
try:
data = nfvo.datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, http_content['sdn_port_mapping'])
return format_out({"sdn_port_mapping": data})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
'''get datacenter sdn mapping details, can use both uuid or name'''
try:
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
data = nfvo.datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id)
return format_out({"sdn_port_mapping": data})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
'''clean datacenter sdn mapping, can use both uuid or name'''
try:
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
data = nfvo.datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id)
return format_out({"result": data})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET') #deprecated
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='GET')
def http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
'''get datacenter networks, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#obtain data
try:
datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
where_= {"datacenter_id":datacenter_dict['uuid']}
if netmap_id:
if utils.check_valid_uuid(netmap_id):
where_["uuid"] = netmap_id
else:
where_["name"] = netmap_id
netmaps =mydb.get_rows(FROM='datacenter_nets',
SELECT=('name','vim_net_id as vim_id', 'uuid', 'type','multipoint','shared','description', 'created_at'),
WHERE=where_ )
utils.convert_float_timestamp2str(netmaps)
utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
if netmap_id and len(netmaps)==1:
data={'netmap' : netmaps[0]}
elif netmap_id and len(netmaps)==0:
bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
return
else:
data={'netmaps' : netmaps}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_getnetwork_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='DELETE')
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='DELETE')
def http_delnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id=None):
'''get datacenter networks, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#obtain data
try:
datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter_id, "datacenter")
where_= {"datacenter_id":datacenter_dict['uuid']}
if netmap_id:
if utils.check_valid_uuid(netmap_id):
where_["uuid"] = netmap_id
else:
where_["name"] = netmap_id
#change_keys_http2db(content, http2db_tenant, reverse=True)
deleted = mydb.delete_row(FROM='datacenter_nets', WHERE= where_)
if deleted == 0 and netmap_id:
bottle.abort(httperrors.Not_Found, "No netmap found with " + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), where_.iteritems())) )
if netmap_id:
return format_out({"result": "netmap %s deleted" % netmap_id})
else:
return format_out({"result": "%d netmap deleted" % deleted})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/upload', method='POST')
def http_uploadnetmap_datacenter_id(tenant_id, datacenter_id):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, None)
utils.convert_float_timestamp2str(netmaps)
utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
data={'netmaps' : netmaps}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_uploadnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='POST')
def http_postnetmap_datacenter_id(tenant_id, datacenter_id):
'''creates a new netmap'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#parse input data
http_content,_ = format_in( netmap_new_schema )
r = utils.remove_extra_items(http_content, netmap_new_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
#obtain data, check that only one exist
netmaps = nfvo.datacenter_new_netmap(mydb, tenant_id, datacenter_id, http_content)
utils.convert_float_timestamp2str(netmaps)
utils.convert_str2boolean(netmaps, ('shared', 'multipoint') )
data={'netmaps' : netmaps}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_postnetmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps/<netmap_id>', method='PUT')
def http_putnettmap_datacenter_id(tenant_id, datacenter_id, netmap_id):
'''edit a netmap'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#parse input data
http_content,_ = format_in( netmap_edit_schema )
r = utils.remove_extra_items(http_content, netmap_edit_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
#obtain data, check that only one exist
try:
nfvo.datacenter_edit_netmap(mydb, tenant_id, datacenter_id, netmap_id, http_content)
return http_getnetmap_datacenter_id(tenant_id, datacenter_id, netmap_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_putnettmap_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/action', method='POST')
def http_action_datacenter_id(tenant_id, datacenter_id):
'''perform an action over datacenter, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#parse input data
http_content,_ = format_in( datacenter_action_schema )
r = utils.remove_extra_items(http_content, datacenter_action_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
#obtain data, check that only one exist
result = nfvo.datacenter_action(mydb, tenant_id, datacenter_id, http_content)
if 'net-update' in http_content:
return http_getnetmap_datacenter_id(datacenter_id)
else:
return format_out(result)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_action_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/datacenters/<datacenter_id>', method='DELETE')
def http_delete_datacenter_id( datacenter_id):
'''delete a tenant from database, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
data = nfvo.delete_datacenter(mydb, datacenter_id)
return format_out({"result":"datacenter '" + data + "' deleted"})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_datacenter_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='POST')
@bottle.route(url_base + '/<tenant_id>/vim_accounts', method='POST')
def http_associate_datacenters(tenant_id, datacenter_id=None):
'''associate an existing datacenter to a this tenant. '''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#parse input data
http_content,_ = format_in(datacenter_associate_schema, confidential_data=True)
r = utils.remove_extra_items(http_content, datacenter_associate_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
vim_account_id = nfvo.create_vim_account(mydb, tenant_id, datacenter_id,
**http_content['datacenter'])
return http_get_vim_account(tenant_id, vim_account_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_associate_datacenters error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='PUT')
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
def http_vim_account_edit(tenant_id, vim_account_id=None, datacenter_id=None):
'''associate an existing datacenter to a this tenant. '''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#parse input data
http_content,_ = format_in(datacenter_associate_schema)
r = utils.remove_extra_items(http_content, datacenter_associate_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
vim_account_id = nfvo.edit_vim_account(mydb, tenant_id, vim_account_id, datacenter_id=datacenter_id,
**http_content['datacenter'])
return http_get_vim_account(tenant_id, vim_account_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_vim_account_edit error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
@bottle.route(url_base + '/<tenant_id>/vim_accounts/<vim_account_id>', method='DELETE')
def http_deassociate_datacenters(tenant_id, datacenter_id=None, vim_account_id=None):
'''deassociate an existing datacenter to a this tenant. '''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
data = nfvo.delete_vim_account(mydb, tenant_id, vim_account_id, datacenter_id)
return format_out({"result": data})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_deassociate_datacenters error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/attach', method='POST')
def http_post_vim_net_sdn_attach(tenant_id, datacenter_id, network_id):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, _ = format_in(sdn_external_port_schema)
try:
data = nfvo.vim_net_sdn_attach(mydb, tenant_id, datacenter_id, network_id, http_content)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_vim_net_sdn_attach error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach', method='DELETE')
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/network/<network_id>/detach/<port_id>', method='DELETE')
def http_delete_vim_net_sdn_detach(tenant_id, datacenter_id, network_id, port_id=None):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
data = nfvo.vim_net_sdn_detach(mydb, tenant_id, datacenter_id, network_id, port_id)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_vim_net_sdn_detach error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='GET')
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='GET')
def http_get_vim_items(tenant_id, datacenter_id, item, name=None):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
data = nfvo.vim_action_get(mydb, tenant_id, datacenter_id, item, name)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_vim_items error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>/<name>', method='DELETE')
def http_del_vim_items(tenant_id, datacenter_id, item, name):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
data = nfvo.vim_action_delete(mydb, tenant_id, datacenter_id, item, name)
return format_out({"result":data})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_del_vim_items error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vim/<datacenter_id>/<item>', method='POST')
def http_post_vim_items(tenant_id, datacenter_id, item):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in( object_schema )
try:
data = nfvo.vim_action_create(mydb, tenant_id, datacenter_id, item, http_content)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_vim_items error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vnfs', method='GET')
def http_get_vnfs(tenant_id):
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
if tenant_id != 'any':
#check valid tenant_id
nfvo.check_tenant(mydb, tenant_id)
select_,where_,limit_ = filter_query_string(bottle.request.query, None,
('uuid', 'name', 'osm_id', 'description', 'public', "tenant_id", "created_at") )
if tenant_id != "any":
where_["OR"]={"tenant_id": tenant_id, "public": True}
vnfs = mydb.get_rows(FROM='vnfs', SELECT=select_, WHERE=where_, LIMIT=limit_)
# change_keys_http2db(content, http2db_vnf, reverse=True)
utils.convert_str2boolean(vnfs, ('public',))
utils.convert_float_timestamp2str(vnfs)
data={'vnfs': vnfs}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_vnfs error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='GET')
def http_get_vnf_id(tenant_id,vnf_id):
'''get vnf details, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
vnf = nfvo.get_vnf_id(mydb,tenant_id,vnf_id)
utils.convert_str2boolean(vnf, ('public',))
utils.convert_float_timestamp2str(vnf)
return format_out(vnf)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_vnf_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vnfs', method='POST')
def http_post_vnfs(tenant_id):
""" Insert a vnf into the catalogue. Creates the flavor and images, and fill the tables at database
:param tenant_id: tenant that this vnf belongs to
:return:
"""
# print "Parsing the YAML file of the VNF"
# parse input data
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, used_schema = format_in( vnfd_schema_v01, ("schema_version",), {"0.2": vnfd_schema_v02})
r = utils.remove_extra_items(http_content, used_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
if used_schema == vnfd_schema_v01:
vnf_id = nfvo.new_vnf(mydb,tenant_id,http_content)
elif used_schema == vnfd_schema_v02:
vnf_id = nfvo.new_vnf_v02(mydb,tenant_id,http_content)
else:
logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
bottle.abort(httperrors.Bad_Request, "Invalid schema version")
return http_get_vnf_id(tenant_id, vnf_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/v3/<tenant_id>/vnfd', method='POST')
def http_post_vnfs_v3(tenant_id):
"""
Insert one or several VNFs in the catalog, following OSM IM
:param tenant_id: tenant owner of the VNF
:return: The detailed list of inserted VNFs, following the old format
"""
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, _ = format_in(None)
try:
vnfd_uuid_list = nfvo.new_vnfd_v3(mydb, tenant_id, http_content)
vnfd_list = []
for vnfd_uuid in vnfd_uuid_list:
vnf = nfvo.get_vnf_id(mydb, tenant_id, vnfd_uuid)
utils.convert_str2boolean(vnf, ('public',))
utils.convert_float_timestamp2str(vnf)
vnfd_list.append(vnf["vnf"])
return format_out({"vnfd": vnfd_list})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_vnfs error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/vnfs/<vnf_id>', method='DELETE')
def http_delete_vnf_id(tenant_id, vnf_id):
'''delete a vnf from database, and images and flavors in VIM when appropriate, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#check valid tenant_id and deletes the vnf, including images,
try:
data = nfvo.delete_vnf(mydb,tenant_id,vnf_id)
#print json.dumps(data, indent=4)
return format_out({"result":"VNF " + data + " deleted"})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_vnf_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
#@bottle.route(url_base + '/<tenant_id>/hosts/topology', method='GET')
#@bottle.route(url_base + '/<tenant_id>/physicalview/Madrid-Alcantara', method='GET')
@bottle.route(url_base + '/<tenant_id>/physicalview/<datacenter>', method='GET')
def http_get_hosts(tenant_id, datacenter):
'''get the tidvim host hopology from the vim.'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
#print "http_get_hosts received by tenant " + tenant_id + ' datacenter ' + datacenter
try:
if datacenter == 'treeview':
data = nfvo.get_hosts(mydb, tenant_id)
else:
#openmano-gui is using a hardcoded value for the datacenter
result, data = nfvo.get_hosts_info(mydb, tenant_id) #, datacenter)
if result < 0:
#print "http_get_hosts error %d %s" % (-result, data)
bottle.abort(-result, data)
else:
utils.convert_float_timestamp2str(data)
#print json.dumps(data, indent=4)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_hosts error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<path:path>', method='OPTIONS')
def http_options_deploy(path):
'''For some reason GUI web ask for OPTIONS that must be responded'''
#TODO: check correct path, and correct headers request
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
bottle.response.set_header('Access-Control-Allow-Methods','POST, GET, PUT, DELETE, OPTIONS')
bottle.response.set_header('Accept','application/yaml,application/json')
bottle.response.set_header('Content-Type','application/yaml,application/json')
bottle.response.set_header('Access-Control-Allow-Headers','content-type')
bottle.response.set_header('Access-Control-Allow-Origin','*')
return
@bottle.route(url_base + '/<tenant_id>/topology/deploy', method='POST')
def http_post_deploy(tenant_id):
'''post topology deploy.'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02})
#r = utils.remove_extra_items(http_content, used_schema)
#if r is not None: print "http_post_deploy: Warning: remove extra items ", r
#print "http_post_deploy input: ", http_content
try:
scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
instance = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['name'], http_content['name'])
#print json.dumps(data, indent=4)
return format_out(instance)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_deploy error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/topology/verify', method='POST')
def http_post_verify(tenant_id):
#TODO:
# '''post topology verify'''
# print "http_post_verify by tenant " + tenant_id + ' datacenter ' + datacenter
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
return
#
# SCENARIOS
#
@bottle.route(url_base + '/<tenant_id>/scenarios', method='POST')
def http_post_scenarios(tenant_id):
'''add a scenario into the catalogue. Creates the scenario and its internal structure in the OPENMANO DB'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, used_schema = format_in( nsd_schema_v01, ("schema_version",), {2: nsd_schema_v02, "0.3": nsd_schema_v03})
#r = utils.remove_extra_items(http_content, used_schema)
#if r is not None: print "http_post_scenarios: Warning: remove extra items ", r
#print "http_post_scenarios input: ", http_content
try:
if used_schema == nsd_schema_v01:
scenario_id = nfvo.new_scenario(mydb, tenant_id, http_content)
elif used_schema == nsd_schema_v02:
scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.2")
elif used_schema == nsd_schema_v03:
scenario_id = nfvo.new_scenario_v02(mydb, tenant_id, http_content, "0.3")
else:
logger.warning('Unexpected schema_version: %s', http_content.get("schema_version"))
bottle.abort(httperrors.Bad_Request, "Invalid schema version")
#print json.dumps(data, indent=4)
#return format_out(data)
return http_get_scenario_id(tenant_id, scenario_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_scenarios error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/v3/<tenant_id>/nsd', method='POST')
def http_post_nsds_v3(tenant_id):
"""
Insert one or several NSDs in the catalog, following OSM IM
:param tenant_id: tenant owner of the NSD
:return: The detailed list of inserted NSDs, following the old format
"""
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content, _ = format_in(None)
try:
nsd_uuid_list = nfvo.new_nsd_v3(mydb, tenant_id, http_content)
nsd_list = []
for nsd_uuid in nsd_uuid_list:
scenario = mydb.get_scenario(nsd_uuid, tenant_id)
utils.convert_float_timestamp2str(scenario)
nsd_list.append(scenario)
data = {'nsd': nsd_list}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_nsds_v3 error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>/action', method='POST')
def http_post_scenario_action(tenant_id, scenario_id):
'''take an action over a scenario'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
# parse input data
http_content, _ = format_in(scenario_action_schema)
r = utils.remove_extra_items(http_content, scenario_action_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
# check valid tenant_id
nfvo.check_tenant(mydb, tenant_id)
if "start" in http_content:
data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['start']['instance_name'], \
http_content['start'].get('description',http_content['start']['instance_name']),
http_content['start'].get('datacenter') )
return format_out(data)
elif "deploy" in http_content: #Equivalent to start
data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['deploy']['instance_name'],
http_content['deploy'].get('description',http_content['deploy']['instance_name']),
http_content['deploy'].get('datacenter') )
return format_out(data)
elif "reserve" in http_content: #Reserve resources
data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['reserve']['instance_name'],
http_content['reserve'].get('description',http_content['reserve']['instance_name']),
http_content['reserve'].get('datacenter'), startvms=False )
return format_out(data)
elif "verify" in http_content: #Equivalent to start and then delete
data = nfvo.start_scenario(mydb, tenant_id, scenario_id, http_content['verify']['instance_name'],
http_content['verify'].get('description',http_content['verify']['instance_name']),
http_content['verify'].get('datacenter'), startvms=False )
instance_id = data['uuid']
nfvo.delete_instance(mydb, tenant_id,instance_id)
return format_out({"result":"Verify OK"})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_scenario_action error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/scenarios', method='GET')
def http_get_scenarios(tenant_id):
'''get scenarios list'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
#obtain data
s,w,l=filter_query_string(bottle.request.query, None,
('uuid', 'name', 'osm_id', 'description', 'tenant_id', 'created_at', 'public'))
if tenant_id != "any":
w["OR"] = {"tenant_id": tenant_id, "public": True}
scenarios = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='scenarios')
utils.convert_float_timestamp2str(scenarios)
utils.convert_str2boolean(scenarios, ('public',) )
data={'scenarios':scenarios}
#print json.dumps(scenarios, indent=4)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='GET')
def http_get_scenario_id(tenant_id, scenario_id):
'''get scenario details, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
#obtain data
scenario = mydb.get_scenario(scenario_id, tenant_id)
utils.convert_float_timestamp2str(scenario)
data={'scenario' : scenario}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_scenarios error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='DELETE')
def http_delete_scenario_id(tenant_id, scenario_id):
'''delete a scenario from database, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
#obtain data
data = mydb.delete_scenario(scenario_id, tenant_id)
#print json.dumps(data, indent=4)
return format_out({"result":"scenario " + data + " deleted"})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_scenario_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/scenarios/<scenario_id>', method='PUT')
def http_put_scenario_id(tenant_id, scenario_id):
'''edit an existing scenario id'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
http_content,_ = format_in( scenario_edit_schema )
#r = utils.remove_extra_items(http_content, scenario_edit_schema)
#if r is not None: print "http_put_scenario_id: Warning: remove extra items ", r
#print "http_put_scenario_id input: ", http_content
try:
nfvo.edit_scenario(mydb, tenant_id, scenario_id, http_content)
#print json.dumps(data, indent=4)
#return format_out(data)
return http_get_scenario_id(tenant_id, scenario_id)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_put_scenario_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/instances', method='POST')
def http_post_instances(tenant_id):
'''create an instance-scenario'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
# parse input data
http_content, used_schema = format_in(instance_scenario_create_schema_v01)
r = utils.remove_extra_items(http_content, used_schema)
if r is not None:
logger.warning("http_post_instances: Warning: remove extra items %s", str(r))
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
data = nfvo.create_instance(mydb, tenant_id, http_content["instance"])
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_instances error {}: {}".format(e.http_code, str(e)), exc_info=True)
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
#
# INSTANCES
#
@bottle.route(url_base + '/<tenant_id>/instances', method='GET')
def http_get_instances(tenant_id):
'''get instance list'''
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
#obtain data
s,w,l=filter_query_string(bottle.request.query, None, ('uuid', 'name', 'scenario_id', 'tenant_id', 'description', 'created_at'))
if tenant_id != "any":
w['tenant_id'] = tenant_id
instances = mydb.get_rows(SELECT=s, WHERE=w, LIMIT=l, FROM='instance_scenarios')
utils.convert_float_timestamp2str(instances)
utils.convert_str2boolean(instances, ('public',) )
data={'instances':instances}
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_instances error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='GET')
def http_get_instance_id(tenant_id, instance_id):
'''get instances details, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
if tenant_id == "any":
tenant_id = None
instance = nfvo.get_instance_id(mydb, tenant_id, instance_id)
# Workaround to SO, convert vnfs:vms:interfaces:ip_address from ";" separated list to report the first value
for vnf in instance.get("vnfs", ()):
for vm in vnf.get("vms", ()):
for iface in vm.get("interfaces", ()):
if iface.get("ip_address"):
index = iface["ip_address"].find(";")
if index >= 0:
iface["ip_address"] = iface["ip_address"][:index]
utils.convert_float_timestamp2str(instance)
# print json.dumps(instance, indent=4)
return format_out(instance)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_instance_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>', method='DELETE')
def http_delete_instance_id(tenant_id, instance_id):
'''delete instance from VIM and from database, can use both uuid or name'''
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
if tenant_id == "any":
tenant_id = None
#obtain data
message = nfvo.delete_instance(mydb, tenant_id,instance_id)
return format_out({"result":message})
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_delete_instance_id error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='POST')
def http_post_instance_scenario_action(tenant_id, instance_id):
"""
take an action over a scenario instance
:param tenant_id: tenant where user belongs to
:param instance_id: instance indentity
:return:
"""
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
# parse input data
http_content, _ = format_in(instance_scenario_action_schema)
r = utils.remove_extra_items(http_content, instance_scenario_action_schema)
if r:
logger.debug("Remove received extra items %s", str(r))
try:
#check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
#print "http_post_instance_scenario_action input: ", http_content
#obtain data
instance = mydb.get_instance_scenario(instance_id, tenant_id)
instance_id = instance["uuid"]
data = nfvo.instance_action(mydb, tenant_id, instance_id, http_content)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_post_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action', method='GET')
@bottle.route(url_base + '/<tenant_id>/instances/<instance_id>/action/<action_id>', method='GET')
def http_get_instance_scenario_action(tenant_id, instance_id, action_id=None):
"""
List the actions done over an instance, or the action details
:param tenant_id: tenant where user belongs to. Can be "any" to ignore
:param instance_id: instance id, can be "any" to get actions of all instances
:return:
"""
logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
try:
# check valid tenant_id
if tenant_id != "any":
nfvo.check_tenant(mydb, tenant_id)
data = nfvo.instance_action_get(mydb, tenant_id, instance_id, action_id)
return format_out(data)
except bottle.HTTPError:
raise
except (nfvo.NfvoException, db_base_Exception) as e:
logger.error("http_get_instance_scenario_action error {}: {}".format(e.http_code, str(e)))
bottle.abort(e.http_code, str(e))
except Exception as e:
logger.error("Unexpected exception: ", exc_info=True)
bottle.abort(httperrors.Internal_Server_Error, type(e).__name__ + ": " + str(e))
@bottle.error(400)
@bottle.error(401)
@bottle.error(404)
@bottle.error(403)
@bottle.error(405)
@bottle.error(406)
@bottle.error(409)
@bottle.error(503)
@bottle.error(500)
def error400(error):
e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
return format_out(e)
| 48.452412
| 148
| 0.665972
| 9,791
| 74,326
| 4.801757
| 0.049433
| 0.045093
| 0.027822
| 0.041732
| 0.805441
| 0.771451
| 0.731293
| 0.698877
| 0.671715
| 0.646191
| 0
| 0.003239
| 0.206496
| 74,326
| 1,533
| 149
| 48.484018
| 0.793906
| 0.105602
| 0
| 0.560504
| 0
| 0
| 0.164568
| 0.049895
| 0
| 0
| 0
| 0.001305
| 0
| 1
| 0.053782
| false
| 0.020168
| 0.010084
| 0.001681
| 0.121008
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
614274fe73e8e99a09619dde31362d02995e1050
| 393
|
py
|
Python
|
tools/w3af/w3af/core/data/parsers/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
tools/w3af/w3af/core/data/parsers/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
tools/w3af/w3af/core/data/parsers/__init__.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
import re
#URL_RE = ('((http|https):[A-Za-z0-9/](([A-Za-z0-9$_.+!*(),;/?:@&~=-])|%'
# '[A-Fa-f0-9]{2})+(#([a-zA-Z0-9][a-zA-Z0-9$_.+!*(),;/?:@&~=%-]*))?)')
URL_RE = re.compile('((http|https)://([\w:@\-\./]*?)[^ \0\n\r\t"\'<>]*)', re.U)
RELATIVE_URL_RE = re.compile(
'((:?[/]{1,2}[\w\-~\.%]+)+\.\w{2,4}(((\?)([\w\-~\.%]*=[\w\-~\.%]*)){1}'
'((&)([\w\-~\.%]*=[\w\-~\.%]*))*)?)', re.U)
| 43.666667
| 79
| 0.320611
| 58
| 393
| 2.068966
| 0.37931
| 0.1
| 0.166667
| 0.2
| 0.208333
| 0.2
| 0.2
| 0.2
| 0
| 0
| 0
| 0.046832
| 0.076336
| 393
| 9
| 80
| 43.666667
| 0.283747
| 0.363868
| 0
| 0
| 0
| 0
| 0.592742
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
61428cb1bd17c54034a7a91e33314c49c207e56a
| 1,125
|
py
|
Python
|
src/opendr/perception/object_detection_2d/__init__.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 217
|
2020-04-10T16:39:36.000Z
|
2022-03-30T15:39:04.000Z
|
src/opendr/perception/object_detection_2d/__init__.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 79
|
2021-06-23T10:40:10.000Z
|
2021-12-16T07:59:42.000Z
|
src/opendr/perception/object_detection_2d/__init__.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 29
|
2021-12-16T09:26:13.000Z
|
2022-03-29T15:19:18.000Z
|
from opendr.perception.object_detection_2d.centernet.centernet_learner import CenterNetDetectorLearner
from opendr.perception.object_detection_2d.detr.detr_learner import DetrLearner
from opendr.perception.object_detection_2d.gem.gem_learner import GemLearner
from opendr.perception.object_detection_2d.retinaface.retinaface_learner import RetinaFaceLearner
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
from opendr.perception.object_detection_2d.yolov3.yolov3_learner import YOLOv3DetectorLearner
from opendr.perception.object_detection_2d.datasets.wider_person import WiderPersonDataset
from opendr.perception.object_detection_2d.datasets.wider_face import WiderFaceDataset
from opendr.perception.object_detection_2d.datasets import transforms
from opendr.perception.object_detection_2d.utils.vis_utils import draw_bounding_boxes
__all__ = ['CenterNetDetectorLearner', 'DetrLearner', 'GemLearner', 'RetinaFaceLearner',
'SingleShotDetectorLearner', 'YOLOv3DetectorLearner', 'WiderPersonDataset', 'WiderFaceDataset',
'transforms', 'draw_bounding_boxes']
| 66.176471
| 106
| 0.868444
| 123
| 1,125
| 7.642276
| 0.268293
| 0.106383
| 0.212766
| 0.276596
| 0.429787
| 0.429787
| 0.154255
| 0.106383
| 0
| 0
| 0
| 0.013359
| 0.068444
| 1,125
| 16
| 107
| 70.3125
| 0.883588
| 0
| 0
| 0
| 0
| 0
| 0.152
| 0.062222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.769231
| 0
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
614d42f47176cd3cfe1f24c3851962b199c67994
| 109
|
py
|
Python
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/relay/op/vision/_make.py
|
mengkai94/training_results_v0.6
|
43dc3e250f8da47b5f8833197d74cb8cf1004fc9
|
[
"Apache-2.0"
] | 64
|
2021-05-02T14:42:34.000Z
|
2021-05-06T01:35:03.000Z
|
python/tvm/relay/op/vision/_make.py
|
ganzhiliang/tvm
|
b076cad542524cb3744149d953c341b5815f6474
|
[
"Apache-2.0"
] | 23
|
2019-07-29T05:21:52.000Z
|
2020-08-31T18:51:42.000Z
|
python/tvm/relay/op/vision/_make.py
|
ganzhiliang/tvm
|
b076cad542524cb3744149d953c341b5815f6474
|
[
"Apache-2.0"
] | 51
|
2019-07-12T05:10:25.000Z
|
2021-07-28T16:19:06.000Z
|
"""Constructor APIs"""
from ...._ffi.function import _init_api
_init_api("relay.op.vision._make", __name__)
| 21.8
| 44
| 0.743119
| 15
| 109
| 4.733333
| 0.866667
| 0.197183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082569
| 109
| 4
| 45
| 27.25
| 0.71
| 0.146789
| 0
| 0
| 0
| 0
| 0.241379
| 0.241379
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
615cd29b421949f6404a4f2577a075608df673f2
| 236
|
py
|
Python
|
phyutil/phylib/scan/__init__.py
|
frib-high-level-controls/phyhlc
|
6486607e3aa0212054a12e9f2ad1a3ef15542f48
|
[
"BSD-3-Clause"
] | 1
|
2018-03-22T15:18:54.000Z
|
2018-03-22T15:18:54.000Z
|
phyutil/phylib/scan/__init__.py
|
frib-high-level-controls/phyhlc
|
6486607e3aa0212054a12e9f2ad1a3ef15542f48
|
[
"BSD-3-Clause"
] | null | null | null |
phyutil/phylib/scan/__init__.py
|
frib-high-level-controls/phyhlc
|
6486607e3aa0212054a12e9f2ad1a3ef15542f48
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Lazy load library form either local scan service or remote RESTful based scan server.
The library would be loaded on the fly according SCAN_SRV_URL variable.
Created on Apr 20, 2015
@author: shen
"""
from scanlib import ScanLib
| 21.454545
| 85
| 0.775424
| 38
| 236
| 4.763158
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030928
| 0.177966
| 236
| 10
| 86
| 23.6
| 0.902062
| 0.838983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
616c8fd06283a13b04de9a1628d72b207df008b1
| 78
|
py
|
Python
|
tests/pack_test.py
|
ryuichi1208/CRUD-frame-flask
|
9b0c6453a276f4035c1acda2b548ff5fe7f6e4e8
|
[
"Apache-2.0"
] | 1
|
2019-08-18T08:21:26.000Z
|
2019-08-18T08:21:26.000Z
|
tests/pack_test.py
|
ryuichi1208/CRUD
|
9b0c6453a276f4035c1acda2b548ff5fe7f6e4e8
|
[
"Apache-2.0"
] | 6
|
2021-03-31T19:21:35.000Z
|
2022-03-11T23:56:16.000Z
|
tests/pack_test.py
|
ryuichi1208/CRUD
|
9b0c6453a276f4035c1acda2b548ff5fe7f6e4e8
|
[
"Apache-2.0"
] | null | null | null |
from src import *
create.print_pack()
rb.read_book_info("data/sample.json")
| 13
| 37
| 0.75641
| 13
| 78
| 4.307692
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 5
| 38
| 15.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
618a002b89a23502680282c3e9d117347d37c250
| 69
|
py
|
Python
|
redwind/wsgi.py
|
kylewm/redwind
|
7ad807b5ab2dd74a8d470dbea9dd4baf5567d9c6
|
[
"BSD-2-Clause"
] | 35
|
2015-01-08T03:26:39.000Z
|
2020-09-16T00:42:17.000Z
|
redwind/wsgi.py
|
kylewm/redwind
|
7ad807b5ab2dd74a8d470dbea9dd4baf5567d9c6
|
[
"BSD-2-Clause"
] | 47
|
2015-01-05T23:22:08.000Z
|
2021-02-02T21:43:26.000Z
|
redwind/wsgi.py
|
kylewm/redwind
|
7ad807b5ab2dd74a8d470dbea9dd4baf5567d9c6
|
[
"BSD-2-Clause"
] | 10
|
2015-02-20T00:51:37.000Z
|
2022-01-11T10:59:32.000Z
|
from . import create_app
application = create_app('../redwind.cfg')
| 17.25
| 42
| 0.73913
| 9
| 69
| 5.444444
| 0.777778
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 3
| 43
| 23
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0.202899
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4ef8a3824851f6b9f56ee4c14582a2a48239163a
| 121
|
py
|
Python
|
django_gotolong/gweight/admin.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 15
|
2019-12-06T16:19:45.000Z
|
2021-08-20T13:22:22.000Z
|
django_gotolong/gweight/admin.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 14
|
2020-12-08T10:45:05.000Z
|
2021-09-21T17:23:45.000Z
|
django_gotolong/gweight/admin.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 9
|
2020-01-01T03:04:29.000Z
|
2021-04-18T08:42:30.000Z
|
from django.contrib import admin
# Register your models here.
from .models import Gweight
admin.site.register(Gweight)
| 17.285714
| 32
| 0.801653
| 17
| 121
| 5.705882
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132231
| 121
| 6
| 33
| 20.166667
| 0.92381
| 0.214876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
f62f593191a0a4b056b492ed4b0c18f0f7cec798
| 706
|
py
|
Python
|
core/models/managers/UsuarioManager.py
|
roimpacta/exemplos
|
cbfe7c81fc14932697c02eb63bec7d7e4a2c5d5a
|
[
"Apache-2.0"
] | null | null | null |
core/models/managers/UsuarioManager.py
|
roimpacta/exemplos
|
cbfe7c81fc14932697c02eb63bec7d7e4a2c5d5a
|
[
"Apache-2.0"
] | null | null | null |
core/models/managers/UsuarioManager.py
|
roimpacta/exemplos
|
cbfe7c81fc14932697c02eb63bec7d7e4a2c5d5a
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.models import AbstractBaseUser, UserManager, BaseUserManager
from django.db import models
class UsuarioManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, ra, password, **extra_fields):
if not ra:
raise ValueError('RA precisa ser preenchido')
user = self.model(ra=ra, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, ra, password, **extra_fields):
return self._create_user(ra, password, **extra_fields)
def create_superuser(self, ra, password, **extra_fields):
return self._create_user(ra, password, **extra_fields)
| 35.3
| 85
| 0.701133
| 88
| 706
| 5.420455
| 0.420455
| 0.138365
| 0.157233
| 0.220126
| 0.383648
| 0.383648
| 0.383648
| 0.383648
| 0.27673
| 0.27673
| 0
| 0
| 0.201133
| 706
| 19
| 86
| 37.157895
| 0.845745
| 0
| 0
| 0.133333
| 0
| 0
| 0.035411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.4
| 0.133333
| 0.133333
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
f6337b2484b8660d5938695c2b487b77db4d2994
| 1,932
|
py
|
Python
|
tests/test_metrics/test___init__.py
|
dmayo/brain-score
|
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
|
[
"MIT"
] | 52
|
2019-12-13T06:43:44.000Z
|
2022-02-21T07:47:39.000Z
|
tests/test_metrics/test___init__.py
|
dmayo/brain-score
|
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
|
[
"MIT"
] | 104
|
2019-12-06T18:08:54.000Z
|
2022-03-31T23:57:51.000Z
|
tests/test_metrics/test___init__.py
|
dmayo/brain-score
|
3ab4258152c9e3f8c7d29afb10158b184dbcebbe
|
[
"MIT"
] | 32
|
2019-12-05T14:31:14.000Z
|
2022-03-10T02:04:45.000Z
|
import numpy as np
from brainio.assemblies import DataAssembly
from brainscore.metrics import Score
class TestScoreRaw:
def test_sel(self):
score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
sel_score = score.sel(a=1)
np.testing.assert_array_equal(sel_score.raw['a'], [1, 1])
def test_isel(self):
score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
sel_score = score.isel(a=0)
np.testing.assert_array_equal(sel_score.raw['a'], [1, 1])
def test_sel_no_apply_raw(self):
score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
sel_score = score.sel(a=1, _apply_raw=False)
np.testing.assert_array_equal(sel_score.raw['a'], [1, 1, 2, 2])
def test_squeeze(self):
score = Score([[1, 2]], coords={'s': [0], 'a': [1, 2]}, dims=['s', 'a'])
score.attrs['raw'] = DataAssembly([[0, 2, 1, 3]], coords={'s': [0], 'a': [1, 1, 2, 2]}, dims=['s', 'a'])
sel_score = score.squeeze('s')
np.testing.assert_array_equal(sel_score.raw.dims, ['a'])
def test_mean(self):
score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
mean_score = score.mean('a')
np.testing.assert_array_equal(mean_score.raw['a'], [1, 1, 2, 2])
def test_mean_no_apply_raw(self):
score = Score([1, 2], coords={'a': [1, 2]}, dims=['a'])
score.attrs['raw'] = DataAssembly([0, 2, 1, 3], coords={'a': [1, 1, 2, 2]}, dims=['a'])
mean_score = score.mean('a', _apply_raw=True)
assert mean_score.raw == 1.5
| 44.930233
| 112
| 0.54089
| 311
| 1,932
| 3.237942
| 0.125402
| 0.039722
| 0.079444
| 0.031778
| 0.770606
| 0.739821
| 0.709037
| 0.709037
| 0.673287
| 0.644489
| 0
| 0.060106
| 0.216356
| 1,932
| 42
| 113
| 46
| 0.60502
| 0
| 0
| 0.352941
| 0
| 0
| 0.02795
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.176471
| false
| 0
| 0.088235
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f672a6d387ce80b4a9a8b4f81c34ce28cfc98655
| 3,704
|
py
|
Python
|
bcg/_nbdev.py
|
eschmidt42/bcg
|
3b35de4327d0cfdbabbe784dfc693d695fd013b6
|
[
"Apache-2.0"
] | 1
|
2022-01-17T07:03:14.000Z
|
2022-01-17T07:03:14.000Z
|
bcg/_nbdev.py
|
eschmidt42/bcg
|
3b35de4327d0cfdbabbe784dfc693d695fd013b6
|
[
"Apache-2.0"
] | 2
|
2021-09-28T01:41:23.000Z
|
2022-02-26T07:12:27.000Z
|
bcg/_nbdev.py
|
eschmidt42/bcg
|
3b35de4327d0cfdbabbe784dfc693d695fd013b6
|
[
"Apache-2.0"
] | 1
|
2022-01-17T07:04:49.000Z
|
2022-01-17T07:04:49.000Z
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"GenVars": "00_basics.ipynb",
"CommonCauses": "00_basics.ipynb",
"Instruments": "00_basics.ipynb",
"EffectModifiers": "00_basics.ipynb",
"Treatments": "00_basics.ipynb",
"initialize": "00_basics.ipynb",
"generate": "00_basics.ipynb",
"get_obs": "00_basics.ipynb",
"CommonCauses.initialize": "00_basics.ipynb",
"CommonCauses.generate": "00_basics.ipynb",
"CommonCauses.get_obs": "00_basics.ipynb",
"Instruments.initialize": "00_basics.ipynb",
"Instruments.get_obs": "00_basics.ipynb",
"Instruments.generate": "00_basics.ipynb",
"EffectModifiers.initialize": "00_basics.ipynb",
"EffectModifiers.get_obs": "00_basics.ipynb",
"EffectModifiers.generate": "00_basics.ipynb",
"stochastically_convert_to_binary": "00_basics.ipynb",
"Treatments.initialize": "00_basics.ipynb",
"Treatments.generate": "00_basics.ipynb",
"Treatments.get_obs": "00_basics.ipynb",
"Outcomes": "00_basics.ipynb",
"plot_target_vs_rest": "00_basics.ipynb",
"plot_var_hists": "00_basics.ipynb",
"show_correlations": "00_basics.ipynb",
"get_Xy": "00_basics.ipynb",
"get_model_feel": "00_basics.ipynb",
"get_feature_importance": "00_basics.ipynb",
"get_partial_dependencies": "00_basics.ipynb",
"plot_partial_dependencies": "00_basics.ipynb",
"GraphGenerator": "00_basics.ipynb",
"get_only_Xi_to_Y": "00_basics.ipynb",
"GraphGenerator.get_only_Xi_to_Y": "00_basics.ipynb",
"get_Xi_to_Y_with_ccs_and_such": "00_basics.ipynb",
"GraphGenerator.get_Xi_to_Y_with_ccs_and_such": "00_basics.ipynb",
"vis_g": "00_basics.ipynb",
"GraphGenerator.vis_g": "00_basics.ipynb",
"get_gml": "00_basics.ipynb",
"GraphGenerator.get_gml": "00_basics.ipynb",
"CausalGraph": "02_causal_model.ipynb",
"show_graph": "02_causal_model.ipynb",
"view_graph": "02_causal_model.ipynb",
"CausalGraph.view_graph": "02_causal_model.ipynb",
"get_ancestors": "02_causal_model.ipynb",
"CausalGraph.get_ancestors": "02_causal_model.ipynb",
"cut_edges": "02_causal_model.ipynb",
"CausalGraph.cut_edges": "02_causal_model.ipynb",
"get_causes": "02_causal_model.ipynb",
"CausalGraph.get_causes": "02_causal_model.ipynb",
"get_instruments": "02_causal_model.ipynb",
"CausalGraph.get_instruments": "02_causal_model.ipynb",
"get_effect_modifiers": "02_causal_model.ipynb",
"CausalGraph.get_effect_modifiers": "02_causal_model.ipynb",
"CausalModel": "02_causal_model.ipynb",
"identify_effect": "02_causal_model.ipynb",
"construct_backdoor": "02_causal_model.ipynb",
"construct_instrumental_variable": "02_causal_model.ipynb",
"CausalModel.construct_backdoor": "02_causal_model.ipynb",
"CausalModel.construct_instrumental_variable": "02_causal_model.ipynb",
"CausalModel.identify_effect": "02_causal_model.ipynb",
"RegressionEstimator": "02_causal_model.ipynb",
"get_Xy_with_products": "02_causal_model.ipynb",
"estimate_effect": "02_causal_model.ipynb",
"CausalModel.estimate_effect": "02_causal_model.ipynb"}
modules = ["basics.py",
"causal_model.py"]
doc_url = "https://fastai.github.io/bcg/"
git_url = "https://github.com/fastai/bcg/tree/master/"
def custom_doc_links(name): return None
| 47.487179
| 80
| 0.657397
| 428
| 3,704
| 5.273364
| 0.21028
| 0.138237
| 0.224634
| 0.19938
| 0.544085
| 0.400975
| 0.136464
| 0.104564
| 0.031015
| 0.031015
| 0
| 0.043199
| 0.200054
| 3,704
| 77
| 81
| 48.103896
| 0.718529
| 0.009719
| 0
| 0
| 1
| 0
| 0.667758
| 0.333061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.014286
| 0.014286
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
f674eacdb8914ef2cd91737543492e75adc3bdd5
| 152
|
py
|
Python
|
aoc20171202a.py
|
BarnabyShearer/aoc
|
4feb66c668b068f0f42ad99b916e80732eba5a2d
|
[
"MIT"
] | null | null | null |
aoc20171202a.py
|
BarnabyShearer/aoc
|
4feb66c668b068f0f42ad99b916e80732eba5a2d
|
[
"MIT"
] | null | null | null |
aoc20171202a.py
|
BarnabyShearer/aoc
|
4feb66c668b068f0f42ad99b916e80732eba5a2d
|
[
"MIT"
] | null | null | null |
def check(line):
return max(line) - min(line)
def aoc(data):
return sum([check([int(x) for x in line.split()]) for line in data.split("\n")])
| 21.714286
| 84
| 0.618421
| 27
| 152
| 3.481481
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 152
| 6
| 85
| 25.333333
| 0.758065
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
9ca592f62fa6011f23e2615ebf9b5413756c6e71
| 49
|
py
|
Python
|
torchbiomed/datasets/__init__.py
|
aicentral/torchbiomed
|
661b3e4411f7e57f4c5cbb56d02998d2d8bddfdb
|
[
"BSD-3-Clause"
] | 106
|
2017-03-24T09:36:18.000Z
|
2021-11-30T11:31:22.000Z
|
torchbiomed/datasets/__init__.py
|
aicentral/torchbiomed
|
661b3e4411f7e57f4c5cbb56d02998d2d8bddfdb
|
[
"BSD-3-Clause"
] | 4
|
2017-05-11T04:06:48.000Z
|
2021-04-16T09:38:18.000Z
|
torchbiomed/datasets/__init__.py
|
aicentral/torchbiomed
|
661b3e4411f7e57f4c5cbb56d02998d2d8bddfdb
|
[
"BSD-3-Clause"
] | 37
|
2017-05-11T07:25:06.000Z
|
2022-01-16T16:06:42.000Z
|
from .luna16 import LUNA16
__all__ = ('LUNA16')
| 12.25
| 26
| 0.714286
| 6
| 49
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0.163265
| 49
| 3
| 27
| 16.333333
| 0.609756
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
9cafd721fe6d01b5f1bc056090f0209840dec272
| 153
|
py
|
Python
|
server/learning/login/api_urls.py
|
kantanand/insmartapps
|
4ab54bb41101e43b5edaac9795509584f01c5c92
|
[
"MIT"
] | 3
|
2016-05-01T18:39:08.000Z
|
2019-02-19T11:55:40.000Z
|
server/learning/login/api_urls.py
|
kantanand/insmartapps
|
4ab54bb41101e43b5edaac9795509584f01c5c92
|
[
"MIT"
] | 1
|
2016-04-28T16:41:24.000Z
|
2016-06-11T19:11:14.000Z
|
server/learning/login/api_urls.py
|
kantanand/insmartapps
|
4ab54bb41101e43b5edaac9795509584f01c5c92
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
url(r'^get-auth-token/', obtain_jwt_token),
]
| 25.5
| 53
| 0.777778
| 24
| 153
| 4.708333
| 0.666667
| 0.159292
| 0.247788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 153
| 6
| 54
| 25.5
| 0.837037
| 0
| 0
| 0
| 0
| 0
| 0.103896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
9ccb1093180f78123e9f1ee8fdfac29431e8638b
| 8,830
|
py
|
Python
|
gen_1.py
|
heyfaraday/CMB_test
|
ff4c63bd5797dec02c23338c67e761ef62c87338
|
[
"MIT"
] | null | null | null |
gen_1.py
|
heyfaraday/CMB_test
|
ff4c63bd5797dec02c23338c67e761ef62c87338
|
[
"MIT"
] | null | null | null |
gen_1.py
|
heyfaraday/CMB_test
|
ff4c63bd5797dec02c23338c67e761ef62c87338
|
[
"MIT"
] | 1
|
2022-02-13T04:24:42.000Z
|
2022-02-13T04:24:42.000Z
|
import numpy as np
from math import sqrt, pi, sin, cos
from lib import minkowski
L_max_field = 7
L_max_polynom = 7
N = 8
def coef_1(in_l, in_m):
if in_l != 0:
return sqrt((in_l - in_m) * (2.0 * in_l + 1.0)
/ ((in_l + in_m) * (2.0 * in_l - 1.0)))
if in_l == 0:
return 0.0
# P_ generation
P_ = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
P_[j][0][0] = 1.0 / sqrt(4.0 * pi)
for m in xrange(0, L_max_polynom):
P_[j][m + 1][m + 1] = - P_[j][m][m] * sin(theta) * sqrt(2.0 * m + 3.0) / sqrt(2.0 * m + 2.0)
for m in xrange(0, L_max_polynom):
P_[j][m][m + 1] = P_[j][m][m] * cos(theta) * sqrt(2.0 * m + 3.0)
for m in xrange(0, L_max_polynom - 1):
for l in xrange(m + 2, L_max_polynom + 1):
P_[j][m][l] = ((2.0 * l - 1.0) * sqrt((l - m) * (2.0 * l + 1.0)) / sqrt((l + m) * (2.0 * l - 1.0)) *
P_[j][m][l - 1] * cos(theta) - (l + m - 1.0) * sqrt((l - m) * (l - 1.0 - m) *
(2.0 * l + 1.0)) / sqrt((l + m) * (l - 1.0 + m) * (2.0 * l - 3.0)) * P_[j][m][l - 2]) / \
(l - m)
for m in xrange(1, L_max_polynom + 1):
for l in xrange(m, L_max_polynom + 1):
P_[j][m][l] *= sqrt(2.0)
# F_x generation - np.imag + np.real
F_x = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for l in xrange(2, L_max_polynom + 1):
for m in xrange(0, l + 1):
F_x[j][m][l] = m * P_[j][m][l] / sin(theta)
# F_y generation - np.real + np.imag
F_y = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for l in xrange(2, L_max_polynom + 1):
for m in xrange(0, l + 1):
F_y[j][m][l] = l * cos(theta) / sin(theta) * P_[j][m][l] - \
1.0 / sin(theta) * (l + m) * coef_1(l, m) * P_[j][m][l - 1]
# F_xy generation - np.imag + np.real
F_xy = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for l in xrange(2, L_max_polynom + 1):
for m in xrange(0, l + 1):
F_xy[j][m][l] = m / sin(theta) * ((1.0 / sin(theta)) * (l + m) * P_[j][m][l - 1] * coef_1(l, m) -
(l - 1.0) * cos(theta) / sin(theta) * P_[j][m][l])
# F_xx_1 generation - np.real + np.real
F_xx_1 = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for l in xrange(2, L_max_polynom + 1):
for m in xrange(0, l + 1):
F_xx_1[j][m][l] = - m * m * P_[j][m][l] / (sin(theta) * sin(theta))
# F_xx_2 generation - np.real + np.real
F_xx_2 = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for l in xrange(2, L_max_polynom + 1):
for m in xrange(0, l + 1):
F_xx_2[j][m][l] = (l * cos(theta) / sin(theta) * P_[j][m][l] - 1.0 / sin(theta) * (l + m) * coef_1(l, m) *
P_[j][m][l - 1]) * cos(theta) / sin(theta)
# F_yy generation - np.real + np.real
F_yy = np.zeros((N / 2 + 1, L_max_polynom + 1, L_max_polynom + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for l in xrange(2, L_max_polynom + 1):
for m in xrange(0, l + 1):
F_yy[j][m][l] = 0.5 / sin(theta) * ((1.0 / sin(theta)) * (l * l * cos(2.0 * theta) -
(l + 2.0) * l + 2.0 * m * m) * P_[j][m][l] + 2.0 * (l + m) * cos(theta) /
sin(theta) * coef_1(l, m) * P_[j][m][l - 1])
x = np.zeros((N + 1, N / 2 + 1))
y = np.zeros((N + 1, N / 2 + 1))
for i in xrange(0, N + 1):
for j in xrange(0, N / 2 + 1):
x[i][j] = (2.0 * i - N) / N * pi
y[i][j] = 2.0 * j / N * pi - pi / 2.0
Fa = np.zeros((N / 2 + 1, N))
Fb = np.zeros((N / 2 + 1, N))
T = np.zeros(N)
# a_coef = np.random.normal(0.0, 1.0, size=(L_max_polynom + 1, L_max_polynom + 1))
# b_coef = np.random.normal(0.0, 1.0, size=(L_max_polynom + 1, L_max_polynom + 1))
a_coef = np.zeros((L_max_field + 1, L_max_field + 1))
b_coef = np.zeros((L_max_field + 1, L_max_field + 1))
for m in xrange(0, L_max_field + 1):
for l in xrange(0, m):
a_coef[m][l] = 0.0
b_coef[m][l] = 0.0
for l in xrange(0, L_max_field + 1):
b_coef[0][l] = 0.0
a_coef[0][0] = 0.0
b_coef[0][0] = 0.0
a_coef[0][1] = 0.0
a_coef[1][1] = 1.0
b_coef[0][1] = 1.0
b_coef[1][1] = 0.0
C = np.zeros((L_max_field + 1))
for l in xrange(0, L_max_field + 1):
C_sum = 0.0
for m in xrange(0, l + 1):
C_sum = C_sum + a_coef[m][l] * a_coef[m][l] + b_coef[m][l] * b_coef[m][l]
C[l] = C_sum / (2.0 * l + 1.0)
sigma_0 = 0.0
for l in xrange(0, L_max_field + 1):
sigma_0 += (2.0 * l + 1.0) * C[l]
sigma_0 = sqrt(sigma_0 / 4.0 / pi)
sigma_1 = 0.0
for l in xrange(0, L_max_field + 1):
sigma_1 += l * (l + 1.0) * (2.0 * l + 1.0) * C[l]
sigma_1 = sqrt(sigma_1 / 4.0 * pi)
sigma_2 = 0.0
for l in xrange(0, L_max_field + 1):
sigma_2 += (l + 2.0) * (l - 1.0) * l * (l + 1.0) * (2.0 * l + 1.0) * C[l]
sigma_2 = sqrt(sigma_2 / 4.0 * pi)
func1 = 0.0
func2 = 0.0
# field generation
field = np.zeros((N + 1, N / 2 + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for m in xrange(0, L_max_field + 1):
for l in xrange(m, L_max_field + 1):
func1 += a_coef[m][l] * P_[j][m][l]
func2 += b_coef[m][l] * P_[j][m][l]
Fa[j][m] = func1
Fb[j][m] = func2
func1 = 0.0
func2 = 0.0
T = np.real(np.fft.fft(Fa[j])) + np.imag(np.fft.fft(Fb[j]))
field[0:N, j] = T[:]
field[N][j] = field[0][j]
# field_x generation
field_x = np.zeros((N + 1, N / 2 + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for m in xrange(0, L_max_field + 1):
for l in xrange(m, L_max_field + 1):
func1 += a_coef[m][l] * F_x[j][m][l]
func2 += b_coef[m][l] * F_x[j][m][l]
Fa[j][m] = func1
Fb[j][m] = func2
func1 = 0.0
func2 = 0.0
T = - np.imag(np.fft.fft(Fa[j])) + np.real(np.fft.fft(Fb[j]))
field_x[0:N, j] = T[:]
field_x[N][j] = field_x[0][j]
# field_y generation
field_y = np.zeros((N + 1, N / 2 + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for m in xrange(0, L_max_field + 1):
for l in xrange(m, L_max_field + 1):
func1 += a_coef[m][l] * F_y[j][m][l]
func2 += b_coef[m][l] * F_y[j][m][l]
Fa[j][m] = func1
Fb[j][m] = func2
func1 = 0.0
func2 = 0.0
T = np.real(np.fft.fft(Fa[j])) + np.imag(np.fft.fft(Fb[j]))
field_y[0:N, j] = T[:]
field_y[N][j] = field_y[0][j]
# field_xx generation
field_xx = np.zeros((N + 1, N / 2 + 1))
for j in xrange(1, N / 2):
theta = 2 * pi * j / N
for m in xrange(0, L_max_field + 1):
for l in xrange(m, L_max_field + 1):
func1 += a_coef[m][l] * (F_xx_1[j][m][l] + F_xx_2[j][m][l])
func2 += b_coef[m][l] * (F_xx_1[j][m][l] + F_xx_2[j][m][l])
Fa[j][m] = func1
Fb[j][m] = func2
func1 = 0.0
func2 = 0.0
T = np.real(np.fft.fft(Fa[j])) + np.imag(np.fft.fft(Fb[j]))
field_xx[0:N, j] = T[:]
field_xx[N][j] = field_xx[0][j]
# field_yy generation
field_yy = np.zeros((N + 1, N / 2 + 1))
for j in xrange(1, N / 2):
theta = 2.0 * pi * j / N
for m in xrange(0, L_max_field + 1):
for l in xrange(m, L_max_field + 1):
func1 += a_coef[m][l] * F_yy[j][m][l]
func2 += b_coef[m][l] * F_yy[j][m][l]
Fa[j][m] = func1
Fb[j][m] = func2
func1 = 0.0
func2 = 0.0
T = np.real(np.fft.fft(Fa[j])) + np.imag(np.fft.fft(Fb[j]))
field_yy[0:N, j] = T[:]
field_yy[N][j] = field_yy[0][j]
# field_xy generation
field_xy = np.zeros((N + 1, N / 2 + 1))
for j in xrange(1, N / 2):
theta = 2 * pi * j / N
for m in xrange(0, L_max_field + 1):
for l in xrange(m, L_max_field + 1):
func1 += a_coef[m][l] * F_xy[j][m][l]
func2 += b_coef[m][l] * F_xy[j][m][l]
Fa[j][m] = func1
Fb[j][m] = func2
func1 = 0.0
func2 = 0.0
T = - np.imag(np.fft.fft(Fa[j])) + np.real(np.fft.fft(Fb[j]))
field_xy[0:N, j] = T[:]
field_xy[N][j] = field_xy[0][j]
a = 0.0
na = 0.0
for i in xrange(0, N):
for j in xrange(1, N / 2):
a += cos(y[i][j]) * field[i][j] * field[i][j]
na += cos(y[i][j])
sigma_0_map = sqrt(a / na)
field /= sigma_0_map
print minkowski.area(y, field)
print minkowski.length(x, y, field)
print field
| 27.767296
| 118
| 0.478029
| 1,827
| 8,830
| 2.16694
| 0.036125
| 0.031321
| 0.025764
| 0.08487
| 0.8098
| 0.76004
| 0.716848
| 0.659257
| 0.635767
| 0.58247
| 0
| 0.078246
| 0.315402
| 8,830
| 317
| 119
| 27.85489
| 0.576675
| 0.057531
| 0
| 0.442308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.014423
| null | null | 0.014423
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9cce62689b1c5c32ca70652f2026acc5cece9497
| 879
|
py
|
Python
|
neighapp/migrations/0002_auto_20210725_1822.py
|
mohamedissack/Neighbour-App
|
649ca351bbfeef4ca8f2caa75c2878a178c06cb1
|
[
"MIT"
] | null | null | null |
neighapp/migrations/0002_auto_20210725_1822.py
|
mohamedissack/Neighbour-App
|
649ca351bbfeef4ca8f2caa75c2878a178c06cb1
|
[
"MIT"
] | null | null | null |
neighapp/migrations/0002_auto_20210725_1822.py
|
mohamedissack/Neighbour-App
|
649ca351bbfeef4ca8f2caa75c2878a178c06cb1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-25 18:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('neighapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='neighbourhood',
old_name='hood_description',
new_name='neighbourhood_description',
),
migrations.RenameField(
model_name='neighbourhood',
old_name='hood_location',
new_name='neighbourhood_location',
),
migrations.RenameField(
model_name='neighbourhood',
old_name='hood_name',
new_name='neighbourhood_name',
),
migrations.RenameField(
model_name='neighbourhood',
old_name='hood_photo',
new_name='neighbourhood_photo',
),
]
| 25.852941
| 49
| 0.579067
| 78
| 879
| 6.25641
| 0.423077
| 0.278689
| 0.213115
| 0.245902
| 0.442623
| 0.442623
| 0.442623
| 0.442623
| 0
| 0
| 0
| 0.031773
| 0.319681
| 879
| 33
| 50
| 26.636364
| 0.784281
| 0.051195
| 0
| 0.444444
| 1
| 0
| 0.245192
| 0.05649
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
143834f608a51b55557e648c315238846f44aa78
| 74
|
py
|
Python
|
pynmapservice/__init__.py
|
vix597/vrnmap
|
30c9e69c63aa8282f9ed2bccb96afa4226912fc9
|
[
"0BSD"
] | null | null | null |
pynmapservice/__init__.py
|
vix597/vrnmap
|
30c9e69c63aa8282f9ed2bccb96afa4226912fc9
|
[
"0BSD"
] | null | null | null |
pynmapservice/__init__.py
|
vix597/vrnmap
|
30c9e69c63aa8282f9ed2bccb96afa4226912fc9
|
[
"0BSD"
] | null | null | null |
"""PyNmapService - A service to control Nmap with a websocket for VR."""
| 37
| 73
| 0.716216
| 11
| 74
| 4.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175676
| 74
| 1
| 74
| 74
| 0.868852
| 0.891892
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
148ce824bfce64bbffb187870f97b6123dc6abfb
| 133
|
py
|
Python
|
nose2/tests/functional/support/scenario/module_import_err/pkg/test_import_err.py
|
ltfish/nose2
|
e47363dad10056cf906daf387613c21d74f37e56
|
[
"BSD-2-Clause"
] | null | null | null |
nose2/tests/functional/support/scenario/module_import_err/pkg/test_import_err.py
|
ltfish/nose2
|
e47363dad10056cf906daf387613c21d74f37e56
|
[
"BSD-2-Clause"
] | null | null | null |
nose2/tests/functional/support/scenario/module_import_err/pkg/test_import_err.py
|
ltfish/nose2
|
e47363dad10056cf906daf387613c21d74f37e56
|
[
"BSD-2-Clause"
] | null | null | null |
raise ValueError('booms')
import unittest
def test():
pass
class Test(unittest.TestCase):
def test(self):
pass
| 9.5
| 30
| 0.639098
| 16
| 133
| 5.3125
| 0.6875
| 0.164706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255639
| 133
| 13
| 31
| 10.230769
| 0.858586
| 0
| 0
| 0.285714
| 0
| 0
| 0.037594
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.285714
| 0.142857
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
1adfbe5a14442d68c9f4504fad47168384a09dd6
| 961
|
py
|
Python
|
lib/systems/d-aspartic_acid.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/systems/d-aspartic_acid.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
lib/systems/d-aspartic_acid.py
|
pulsar-chem/BPModule
|
f8e64e04fdb01947708f098e833600c459c2ff0e
|
[
"BSD-3-Clause"
] | null | null | null |
import pulsar as psr
def load_ref_system():
""" Returns d-aspartic_acid as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
H 1.1749 0.2716 -0.8816
C 0.2902 -0.3063 -0.5085
C -0.6411 -0.5180 -1.7035
O -0.8069 0.2223 -2.6543
O -1.3677 -1.6581 -1.7034
H -1.9153 -1.6984 -2.4819
C -0.4682 0.5172 0.5398
C 0.4146 0.9504 1.6942
H -0.9011 1.4221 0.0654
N 0.8376 -1.5441 0.0868
H 1.2665 -2.1008 -0.6201
H 0.1218 -2.0662 0.5461
O 1.4697 1.7781 1.5170
O 0.2747 0.6686 2.8640
H -1.3307 -0.0569 0.9378
H 1.5926 1.9741 0.5965
""")
| 40.041667
| 71
| 0.419355
| 143
| 961
| 2.79021
| 0.573427
| 0.025063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.480962
| 0.480749
| 961
| 23
| 72
| 41.782609
| 0.318637
| 0.116545
| 0
| 0
| 0
| 0
| 0.898673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| true
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1ae7b40a189f027b76a7b1c296483fb5c9a8b309
| 217
|
py
|
Python
|
processmanager/__init__.py
|
igormacedo/process-manager
|
c48568d03c83f034d0114228efc919fc38754dc7
|
[
"MIT"
] | 1
|
2017-09-05T01:27:13.000Z
|
2017-09-05T01:27:13.000Z
|
processmanager/__init__.py
|
igormacedo/process-manager
|
c48568d03c83f034d0114228efc919fc38754dc7
|
[
"MIT"
] | null | null | null |
processmanager/__init__.py
|
igormacedo/process-manager
|
c48568d03c83f034d0114228efc919fc38754dc7
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_socketio import SocketIO, send
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecret'
socketio = SocketIO(app)
import processmanager.views
import processmanager.sockethandler
| 21.7
| 41
| 0.81106
| 27
| 217
| 6.296296
| 0.518519
| 0.105882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110599
| 217
| 9
| 42
| 24.111111
| 0.880829
| 0
| 0
| 0
| 0
| 0
| 0.082949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
210178e002cb091da1855ecd2446c4e7026eb996
| 31,623
|
py
|
Python
|
tests/test_client.py
|
benjamin-bader/aiohttp
|
ee35cfe4714cd1b13655958b4625e1570719e9d5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
benjamin-bader/aiohttp
|
ee35cfe4714cd1b13655958b4625e1570719e9d5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
benjamin-bader/aiohttp
|
ee35cfe4714cd1b13655958b4625e1570719e9d5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for aiohttp/client.py"""
import asyncio
import inspect
import io
import unittest
import unittest.mock
import urllib.parse
import aiohttp
from aiohttp.client import ClientRequest, ClientResponse
try:
import chardet
except ImportError: # pragma: no cover
chardet = None
class ClientResponseTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.connection = unittest.mock.Mock()
self.stream = aiohttp.StreamParser(loop=self.loop)
self.response = ClientResponse('get', 'http://python.org')
def tearDown(self):
self.loop.close()
def test_del(self):
response = ClientResponse('get', 'http://python.org')
connection = unittest.mock.Mock()
response._setup_connection(connection)
with self.assertWarns(ResourceWarning):
del response
connection.close.assert_called_with()
def test_close(self):
self.response.connection = self.connection
self.response.close()
self.assertIsNone(self.response.connection)
self.assertTrue(self.connection.release.called)
self.response.close()
self.response.close()
def test_wait_for_100(self):
response = ClientResponse(
'get', 'http://python.org', continue100=object())
self.assertTrue(response.waiting_for_continue())
response = ClientResponse(
'get', 'http://python.org')
self.assertFalse(response.waiting_for_continue())
def test_repr(self):
self.response.status = 200
self.response.reason = 'Ok'
self.assertIn(
'<ClientResponse(http://python.org) [200 Ok]>',
repr(self.response))
def test_read_and_release_connection(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result(b'payload')
content.read.side_effect = second_call
return fut
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(self.response.read())
self.assertEqual(res, b'payload')
self.assertTrue(self.response.close.called)
def test_read_and_release_connection_with_error(self):
content = self.response.content = unittest.mock.Mock()
content.read.return_value = asyncio.Future(loop=self.loop)
content.read.return_value.set_exception(ValueError)
self.response.close = unittest.mock.Mock()
self.assertRaises(
ValueError,
self.loop.run_until_complete, self.response.read())
self.response.close.assert_called_with(True)
def test_release(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(b'')
content = self.response.content = unittest.mock.Mock()
content.readany.return_value = fut
self.response.close = unittest.mock.Mock()
self.loop.run_until_complete(self.response.release())
self.assertTrue(self.response.close.called)
def test_read_and_close(self):
self.response.read = unittest.mock.Mock()
self.response.read.return_value = asyncio.Future(loop=self.loop)
self.response.read.return_value.set_result(b'data')
with self.assertWarns(DeprecationWarning):
res = self.loop.run_until_complete(self.response.read_and_close())
self.assertEqual(res, b'data')
self.assertTrue(self.response.read.called)
def test_read_decode_deprecated(self):
self.response._content = b'data'
self.response.json = unittest.mock.Mock()
self.response.json.return_value = asyncio.Future(loop=self.loop)
self.response.json.return_value.set_result('json')
with self.assertWarns(DeprecationWarning):
res = self.loop.run_until_complete(self.response.read(decode=True))
self.assertEqual(res, 'json')
self.assertTrue(self.response.json.called)
def test_text(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {
'CONTENT-TYPE': 'application/json;charset=cp1251'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(self.response.text())
self.assertEqual(res, '{"тест": "пройден"}')
self.assertTrue(self.response.close.called)
def test_text_custom_encoding(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {
'CONTENT-TYPE': 'application/json'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(
self.response.text(encoding='cp1251'))
self.assertEqual(res, '{"тест": "пройден"}')
self.assertTrue(self.response.close.called)
@unittest.skipIf(chardet is None, "no chardet")
def test_text_detect_encoding(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {'CONTENT-TYPE': 'application/json'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(self.response.text())
self.assertEqual(res, '{"тест": "пройден"}')
self.assertTrue(self.response.close.called)
def test_text_detect_encoding_without_chardet(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {'CONTENT-TYPE': 'application/json'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
with unittest.mock.patch('aiohttp.client.chardet', None):
self.assertRaises(UnicodeDecodeError,
self.loop.run_until_complete,
self.response.text())
def test_json(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {
'CONTENT-TYPE': 'application/json;charset=cp1251'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(self.response.json())
self.assertEqual(res, {'тест': 'пройден'})
self.assertTrue(self.response.close.called)
def test_json_custom_loader(self):
self.response.headers = {
'CONTENT-TYPE': 'application/json;charset=cp1251'}
self.response._content = b'data'
def custom(content):
return content + '-custom'
res = self.loop.run_until_complete(self.response.json(loads=custom))
self.assertEqual(res, 'data-custom')
@unittest.mock.patch('aiohttp.client.client_logger')
def test_json_no_content(self, m_log):
self.response.headers = {
'CONTENT-TYPE': 'data/octet-stream'}
self.response._content = b''
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(self.response.json())
self.assertIsNone(res)
m_log.warning.assert_called_with(
'Attempt to decode JSON with unexpected mimetype: %s',
'data/octet-stream')
def test_json_override_encoding(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {
'CONTENT-TYPE': 'application/json;charset=utf8'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(
self.response.json(encoding='cp1251'))
self.assertEqual(res, {'тест': 'пройден'})
self.assertTrue(self.response.close.called)
@unittest.skipIf(chardet is None, "no chardet")
def test_json_detect_encoding(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {'CONTENT-TYPE': 'application/json'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
res = self.loop.run_until_complete(self.response.json())
self.assertEqual(res, {'тест': 'пройден'})
self.assertTrue(self.response.close.called)
def test_json_detect_encoding_without_chardet(self):
def side_effect(*args, **kwargs):
def second_call(*args, **kwargs):
raise aiohttp.EofStream
fut = asyncio.Future(loop=self.loop)
fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
content.read.side_effect = second_call
return fut
self.response.headers = {'CONTENT-TYPE': 'application/json'}
content = self.response.content = unittest.mock.Mock()
content.read.side_effect = side_effect
self.response.close = unittest.mock.Mock()
with unittest.mock.patch('aiohttp.client.chardet', None):
self.assertRaises(UnicodeDecodeError,
self.loop.run_until_complete,
self.response.json())
def test_override_flow_control(self):
class MyResponse(ClientResponse):
flow_control_class = aiohttp.FlowControlDataQueue
response = MyResponse('get', 'http://python.org')
response._setup_connection(self.connection)
self.assertIsInstance(response.content, aiohttp.FlowControlDataQueue)
with self.assertWarns(ResourceWarning):
del response
class ClientRequestTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.transport = unittest.mock.Mock()
self.connection = unittest.mock.Mock()
self.protocol = unittest.mock.Mock()
self.protocol.writer.drain.return_value = ()
self.stream = aiohttp.StreamParser(loop=self.loop)
def tearDown(self):
self.loop.close()
def test_method(self):
req = ClientRequest('get', 'http://python.org/')
self.assertEqual(req.method, 'GET')
req = ClientRequest('head', 'http://python.org/')
self.assertEqual(req.method, 'HEAD')
req = ClientRequest('HEAD', 'http://python.org/')
self.assertEqual(req.method, 'HEAD')
def test_version(self):
req = ClientRequest('get', 'http://python.org/', version='1.0')
self.assertEqual(req.version, (1, 0))
def test_version_err(self):
self.assertRaises(
ValueError,
ClientRequest, 'get', 'http://python.org/', version='1.c')
def test_host_port(self):
req = ClientRequest('get', 'http://python.org/')
self.assertEqual(req.host, 'python.org')
self.assertEqual(req.port, 80)
self.assertFalse(req.ssl)
req = ClientRequest('get', 'https://python.org/')
self.assertEqual(req.host, 'python.org')
self.assertEqual(req.port, 443)
self.assertTrue(req.ssl)
req = ClientRequest('get', 'https://python.org:960/')
self.assertEqual(req.host, 'python.org')
self.assertEqual(req.port, 960)
self.assertTrue(req.ssl)
def test_host_port_err(self):
self.assertRaises(
ValueError, ClientRequest, 'get', 'http://python.org:123e/')
def test_host_header(self):
req = ClientRequest('get', 'http://python.org/')
self.assertEqual(req.headers['HOST'], 'python.org')
req = ClientRequest('get', 'http://python.org:80/')
self.assertEqual(req.headers['HOST'], 'python.org:80')
req = ClientRequest('get', 'http://python.org:99/')
self.assertEqual(req.headers['HOST'], 'python.org:99')
req = ClientRequest('get', 'http://python.org/',
headers={'host': 'example.com'})
self.assertEqual(req.headers['HOST'], 'example.com')
req = ClientRequest('get', 'http://python.org/',
headers={'host': 'example.com:99'})
self.assertEqual(req.headers['HOST'], 'example.com:99')
def test_headers(self):
req = ClientRequest('get', 'http://python.org/',
headers={'Content-Type': 'text/plain'})
self.assertIn('CONTENT-TYPE', req.headers)
self.assertEqual(req.headers['CONTENT-TYPE'], 'text/plain')
self.assertEqual(req.headers['ACCEPT-ENCODING'], 'gzip, deflate')
def test_headers_list(self):
req = ClientRequest('get', 'http://python.org/',
headers=[('Content-Type', 'text/plain')])
self.assertIn('CONTENT-TYPE', req.headers)
self.assertEqual(req.headers['CONTENT-TYPE'], 'text/plain')
def test_headers_default(self):
req = ClientRequest('get', 'http://python.org/',
headers={'ACCEPT-ENCODING': 'deflate'})
self.assertEqual(req.headers['ACCEPT-ENCODING'], 'deflate')
def test_invalid_url(self):
self.assertRaises(
ValueError, ClientRequest, 'get', 'hiwpefhipowhefopw')
def test_invalid_idna(self):
self.assertRaises(
ValueError, ClientRequest, 'get', 'http://\u2061owhefopw.com')
def test_no_path(self):
req = ClientRequest('get', 'http://python.org')
self.assertEqual('/', req.path)
def test_basic_auth(self):
req = ClientRequest('get', 'http://python.org',
auth=aiohttp.helpers.BasicAuth('nkim', '1234'))
self.assertIn('AUTHORIZATION', req.headers)
self.assertEqual('Basic bmtpbToxMjM0', req.headers['AUTHORIZATION'])
def test_basic_auth_utf8(self):
req = ClientRequest('get', 'http://python.org',
auth=aiohttp.helpers.BasicAuth('nkim', 'секрет',
'utf-8'))
self.assertIn('AUTHORIZATION', req.headers)
self.assertEqual('Basic bmtpbTrRgdC10LrRgNC10YI=',
req.headers['AUTHORIZATION'])
def test_basic_auth_tuple_deprecated(self):
with self.assertWarns(DeprecationWarning):
req = ClientRequest('get', 'http://python.org',
auth=('nkim', '1234'))
self.assertIn('AUTHORIZATION', req.headers)
self.assertEqual('Basic bmtpbToxMjM0', req.headers['AUTHORIZATION'])
def test_basic_auth_from_url(self):
req = ClientRequest('get', 'http://nkim:1234@python.org')
self.assertIn('AUTHORIZATION', req.headers)
self.assertEqual('Basic bmtpbToxMjM0', req.headers['AUTHORIZATION'])
req = ClientRequest(
'get', 'http://nkim@python.org',
auth=aiohttp.helpers.BasicAuth('nkim', '1234'))
self.assertIn('AUTHORIZATION', req.headers)
self.assertEqual('Basic bmtpbToxMjM0', req.headers['AUTHORIZATION'])
def test_no_content_length(self):
req = ClientRequest('get', 'http://python.org', loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('0', req.headers.get('CONTENT-LENGTH'))
req = ClientRequest('head', 'http://python.org', loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('0', req.headers.get('CONTENT-LENGTH'))
def test_path_is_not_double_encoded(self):
req = ClientRequest('get', "http://0.0.0.0/get/test case")
self.assertEqual(req.path, "/get/test%20case")
req = ClientRequest('get', "http://0.0.0.0/get/test%2fcase")
self.assertEqual(req.path, "/get/test%2fcase")
req = ClientRequest('get', "http://0.0.0.0/get/test%20case")
self.assertEqual(req.path, "/get/test%20case")
def test_params_are_added_before_fragment(self):
req = ClientRequest(
'GET', "http://example.com/path#fragment", params={"a": "b"})
self.assertEqual(
req.path, "/path?a=b#fragment")
req = ClientRequest(
'GET',
"http://example.com/path?key=value#fragment", params={"a": "b"})
self.assertEqual(
req.path, "/path?key=value&a=b#fragment")
def test_cookies(self):
req = ClientRequest(
'get', 'http://test.com/path', cookies={'cookie1': 'val1'})
self.assertIn('COOKIE', req.headers)
self.assertEqual('cookie1=val1', req.headers['COOKIE'])
req = ClientRequest(
'get', 'http://test.com/path',
headers={'cookie': 'cookie1=val1'},
cookies={'cookie2': 'val2'})
self.assertEqual('cookie1=val1; cookie2=val2', req.headers['COOKIE'])
def test_unicode_get(self):
def join(*suffix):
return urllib.parse.urljoin('http://python.org/', '/'.join(suffix))
url = 'http://python.org'
req = ClientRequest('get', url, params={'foo': 'f\xf8\xf8'})
self.assertEqual('/?foo=f%C3%B8%C3%B8', req.path)
req = ClientRequest('', url, params={'f\xf8\xf8': 'f\xf8\xf8'})
self.assertEqual('/?f%C3%B8%C3%B8=f%C3%B8%C3%B8', req.path)
req = ClientRequest('', url, params={'foo': 'foo'})
self.assertEqual('/?foo=foo', req.path)
req = ClientRequest('', join('\xf8'), params={'foo': 'foo'})
self.assertEqual('/%C3%B8?foo=foo', req.path)
def test_query_multivalued_param(self):
for meth in ClientRequest.ALL_METHODS:
req = ClientRequest(
meth, 'http://python.org',
params=(('test', 'foo'), ('test', 'baz')))
self.assertEqual(req.path, '/?test=foo&test=baz')
def test_post_data(self):
for meth in ClientRequest.POST_METHODS:
req = ClientRequest(
meth, 'http://python.org/',
data={'life': '42'}, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('/', req.path)
self.assertEqual(b'life=42', req.body)
self.assertEqual('application/x-www-form-urlencoded',
req.headers['CONTENT-TYPE'])
@unittest.mock.patch('aiohttp.client.ClientRequest.update_body_from_data')
def test_pass_falsy_data(self, _):
req = ClientRequest(
'post', 'http://python.org/',
data={}, loop=self.loop)
req.update_body_from_data.assert_called_once_with({})
def test_get_with_data(self):
for meth in ClientRequest.GET_METHODS:
req = ClientRequest(
meth, 'http://python.org/', data={'life': '42'})
self.assertEqual('/', req.path)
self.assertEqual(b'life=42', req.body)
def test_bytes_data(self):
for meth in ClientRequest.POST_METHODS:
req = ClientRequest(
meth, 'http://python.org/',
data=b'binary data', loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('/', req.path)
self.assertEqual(b'binary data', req.body)
self.assertEqual('application/octet-stream',
req.headers['CONTENT-TYPE'])
def test_files_and_bytes_data(self):
with self.assertRaises(ValueError):
with self.assertWarns(DeprecationWarning):
ClientRequest(
'POST', 'http://python.org/',
data=b'binary data', files={'file': b'file data'})
@unittest.mock.patch('aiohttp.client.aiohttp')
def test_content_encoding(self, m_http):
req = ClientRequest('get', 'http://python.org/',
compress='deflate', loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
self.assertEqual(req.headers['CONTENT-ENCODING'], 'deflate')
m_http.Request.return_value\
.add_compression_filter.assert_called_with('deflate')
@unittest.mock.patch('aiohttp.client.aiohttp')
def test_content_encoding_header(self, m_http):
req = ClientRequest(
'get', 'http://python.org/',
headers={'Content-Encoding': 'deflate'}, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
self.assertEqual(req.headers['CONTENT-ENCODING'], 'deflate')
m_http.Request.return_value\
.add_compression_filter.assert_called_with('deflate')
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(8192)
def test_chunked(self):
req = ClientRequest(
'get', 'http://python.org/',
headers={'TRANSFER-ENCODING': 'gzip'}, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('gzip', req.headers['TRANSFER-ENCODING'])
req = ClientRequest(
'get', 'http://python.org/',
headers={'Transfer-encoding': 'chunked'}, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('chunked', req.headers['TRANSFER-ENCODING'])
@unittest.mock.patch('aiohttp.client.aiohttp')
def test_chunked_explicit(self, m_http):
req = ClientRequest(
'get', 'http://python.org/', chunked=True, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('chunked', req.headers['TRANSFER-ENCODING'])
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(8192)
@unittest.mock.patch('aiohttp.client.aiohttp')
def test_chunked_explicit_size(self, m_http):
req = ClientRequest(
'get', 'http://python.org/', chunked=1024, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('chunked', req.headers['TRANSFER-ENCODING'])
m_http.Request.return_value\
.add_chunking_filter.assert_called_with(1024)
def test_chunked_length(self):
req = ClientRequest(
'get', 'http://python.org/',
headers={'CONTENT-LENGTH': '1000'}, chunked=1024, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
self.assertNotIn('CONTENT-LENGTH', req.headers)
def test_expect100(self):
req = ClientRequest('get', 'http://python.org/',
expect100=True, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('100-continue', req.headers['EXPECT'])
self.assertIsNotNone(req._continue)
req = ClientRequest('get', 'http://python.org/',
headers={'expect': '100-continue'}, loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual('100-continue', req.headers['EXPECT'])
self.assertIsNotNone(req._continue)
def test_data_stream(self):
def gen():
yield b'binary data'
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(inspect.isgenerator(req.body))
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
resp = req.send(self.transport, self.protocol)
self.assertIsInstance(req._writer, asyncio.Future)
self.loop.run_until_complete(resp.wait_for_close())
self.assertIsNone(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-3:],
[unittest.mock.call(b'binary data result'),
unittest.mock.call(b'\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
def test_data_file(self):
req = ClientRequest(
'POST', 'http://python.org/', data=io.BytesIO(b'*' * 2),
loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(isinstance(req.body, io.IOBase))
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
resp = req.send(self.transport, self.protocol)
self.assertIsInstance(req._writer, asyncio.Future)
self.loop.run_until_complete(resp.wait_for_close())
self.assertIsNone(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-3:],
[unittest.mock.call(b'*' * 2),
unittest.mock.call(b'\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
def test_data_stream_exc(self):
fut = asyncio.Future(loop=self.loop)
def gen():
yield b'binary data'
yield from fut
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(inspect.isgenerator(req.body))
self.assertEqual(req.headers['TRANSFER-ENCODING'], 'chunked')
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=self.loop)
fut.set_exception(ValueError)
asyncio.async(exc(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
resp.connection = self.connection
self.loop.run_until_complete(req._writer)
self.assertTrue(self.connection.close.called)
self.assertTrue(self.protocol.set_exception.called)
def test_data_stream_not_bytes(self):
@asyncio.coroutine
def gen():
yield object()
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
req.send(self.transport, self.protocol)
self.loop.run_until_complete(req._writer)
self.assertTrue(self.protocol.set_exception.called)
def test_data_stream_exc_chain(self):
fut = asyncio.Future(loop=self.loop)
def gen():
yield from fut
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
inner_exc = ValueError()
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=self.loop)
fut.set_exception(inner_exc)
asyncio.async(exc(), loop=self.loop)
resp = req.send(self.transport, self.protocol)
resp.connection = self.connection
self.loop.run_until_complete(req._writer)
self.assertTrue(self.connection.close.called)
self.assertTrue(self.protocol.set_exception.called)
outer_exc = self.protocol.set_exception.call_args[0][0]
self.assertIsInstance(outer_exc, aiohttp.ClientRequestError)
self.assertIs(inner_exc, outer_exc.__context__)
self.assertIs(inner_exc, outer_exc.__cause__)
def test_data_stream_continue(self):
def gen():
yield b'binary data'
return b' result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(),
expect100=True, loop=self.loop)
self.assertTrue(req.chunked)
self.assertTrue(inspect.isgenerator(req.body))
def coro():
yield from asyncio.sleep(0.0001, loop=self.loop)
req._continue.set_result(1)
asyncio.async(coro(), loop=self.loop)
req.send(self.transport, self.protocol)
self.loop.run_until_complete(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-3:],
[unittest.mock.call(b'binary data result'),
unittest.mock.call(b'\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
def test_data_continue(self):
req = ClientRequest(
'POST', 'http://python.org/', data=b'data',
expect100=True, loop=self.loop)
def coro():
yield from asyncio.sleep(0.0001, loop=self.loop)
req._continue.set_result(1)
asyncio.async(coro(), loop=self.loop)
req.send(self.transport, self.protocol)
self.assertEqual(1, len(self.transport.write.mock_calls))
self.loop.run_until_complete(req._writer)
self.assertEqual(
self.transport.write.mock_calls[-1],
unittest.mock.call(b'data'))
def test_close(self):
@asyncio.coroutine
def gen():
yield from asyncio.sleep(0.00001, loop=self.loop)
return b'result'
req = ClientRequest(
'POST', 'http://python.org/', data=gen(), loop=self.loop)
req.send(self.transport, self.protocol)
self.loop.run_until_complete(req.close())
self.assertEqual(
self.transport.write.mock_calls[-3:],
[unittest.mock.call(b'result'),
unittest.mock.call(b'\r\n'),
unittest.mock.call(b'0\r\n\r\n')])
def test_custom_response_class(self):
class CustomResponse(ClientResponse):
def read(self, decode=False):
return 'customized!'
req = ClientRequest(
'GET', 'http://python.org/', response_class=CustomResponse,
loop=self.loop)
resp = req.send(self.transport, self.protocol)
self.assertEqual('customized!', resp.read())
| 39.578223
| 79
| 0.611074
| 3,577
| 31,623
| 5.287112
| 0.081353
| 0.050127
| 0.036432
| 0.042566
| 0.799651
| 0.762003
| 0.726681
| 0.670685
| 0.64081
| 0.60496
| 0
| 0.011502
| 0.24947
| 31,623
| 798
| 80
| 39.62782
| 0.785329
| 0.001202
| 0
| 0.606383
| 0
| 0
| 0.136613
| 0.014897
| 0
| 0
| 0
| 0
| 0.231003
| 0
| null | null | 0.00152
| 0.015198
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
210eace1f941559dd477cf20d2205ab83dd98fe2
| 8,261
|
py
|
Python
|
pymps/lanczos.py
|
jacobmanalo/dmrg_tool
|
8a14b7f33b77f53df4356f090bdcd1a82b12ff20
|
[
"Apache-2.0"
] | null | null | null |
pymps/lanczos.py
|
jacobmanalo/dmrg_tool
|
8a14b7f33b77f53df4356f090bdcd1a82b12ff20
|
[
"Apache-2.0"
] | null | null | null |
pymps/lanczos.py
|
jacobmanalo/dmrg_tool
|
8a14b7f33b77f53df4356f090bdcd1a82b12ff20
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 11 11:28:56 2022
Original Code from Dr. Salim Belhaiza
https://www.youtube.com/watch?v=S416IbCFeEA&t=185s
"""
import numpy as np
from scipy.sparse import linalg as la
import scipy as SP
import sys
def random_hermitian(n):
#A=np.random.rand(n,n)
A = SP.sparse.rand(n, n, density=0.01)
Adag = A.conjugate().transpose()
H = 0.5*(A+Adag)
H = H.toarray()
return H
def eigensolver(d,e,num_evals):
evals, evecs = SP.linalg.eigh_tridiagonal(d, e, select='i', select_range=[0,num_evals-1])
return evals, evecs
def lanczos_jake(A,num_iter,num_evals):
"""
Tridiagonalization of matrix A using Gram-Schmidt Orthogonalization i.e.
the Lanczos algorithm. To avoid loss of orthogonality due to numerical
error, I included a QR decomposition of the Krylov vector matrix Q.
Parameters
----------
A : numpy array
Matrix
num_iter : int
Number of Krylov vectors to be generated
num_evals : numpy array
Number of eigenvalues to be generated.
Returns
-------
evals : numpy array
Array of eigenvalues
evecs_transformed : numpy array
Array of eigenvectors in the original basis of A
"""
m = A.shape[0]
b = np.random.rand(m)
Q = np.zeros((m, num_iter))
q = b / np.linalg.norm(b)
Q[:,0] = q
for k in range(num_iter):
v = np.dot(A, Q[:,k])
#alpha[k] = np.dot(Q[:,k],v)
v = v - b[k-1]*Q[:,k-1] - np.dot(Q[:,k],v)*Q[:,k]
normv = np.linalg.norm(v)
b[k]=normv
eps = 1e-12
if normv > eps:
q = v/normv
if k < num_iter - 1:
Q[:,k+1]=q
else:
print("norm is zero!",k,normv)
break
Q,_ = np.linalg.qr(Q)
Aprime = np.dot(np.transpose(Q),np.dot(A,Q))
evals, evecs = la.eigs(Aprime,k=num_evals,which='SR')
evecs_transformed = np.dot(Q,evecs)
return evals.real, evecs_transformed
def lanczos(A, r0, num_iter, num_evals):
"""
Tridiagonalization of matrix A using Gram-Schmidt Orthogonalization i.e.
the Lanczos algorithm. To avoid loss of orthogonality due to numerical
error, I included a QR decomposition of the Krylov vector matrix Q.
Parameters
----------
A : numpy array
Matrix
num_iter : int
Number of Krylov vectors to be generated
num_evals : numpy array
Number of eigenvalues to be generated.
Returns
-------
evals : numpy array
Array of eigenvalues
evecs_transformed : numpy array
Array of eigenvectors in the original basis of A
"""
m = A.shape[0]
eps = 1e-20
b = r0#np.random.rand(m)
norm1 = np.linalg.norm(b)
if norm1 < eps:
b = np.random.rand(m)
Q = np.zeros((m, num_iter))
q = b / np.linalg.norm(b)
Q[:,0] = q
b = q
for k in range(num_iter):
#v = np.matmul(A, Q[:,k])
v = A.dot(Q[:,k])
#alpha[k] = np.dot(Q[:,k],v)
v = v - b[k-1]*Q[:,k-1] - np.dot(Q[:,k],v)*Q[:,k]
normv = np.linalg.norm(v)
b[k]=normv
if normv > eps:
q = v/normv
if k < num_iter - 1:
Q[:,k+1]=q
else:
print("norm is zero!",k,normv)
break
Q,_ = np.linalg.qr(Q)
Aprime = np.dot(np.transpose(Q),A.dot(Q))
evals, evecs = la.eigs(Aprime,k=num_evals,which='SR')
evecs_transformed = np.dot(Q,evecs)
return evals.real, evecs_transformed.real
def lanczos2(A, r0, num_iter):
"""
Obtain the lowest eigevalue and eigenvector of a Linear Operator A
Implementation of Lanczos algorithm.
Following section 4.4 of the book " Templates for the solution of algebraic
eigenvalue problems: a practical guide"
Parameters
----------
A : Linear Operator
A linaer operador with the matrix a matrix-vector multiplication (matvec) as
a member function.
r0: ket state compatible with the linear operator
Initial guest. Compatible mean that the operation A.matvec(r0) is defined
num_iter : int
Number max of iteration
Returns
-------
evals : lowest eigenvalue- float 64
evecs_transformed : ket corresponding to the lowest eigenvalue
"""
a = []
b = []
shape = r0.shape
# for i in range(len(r0.shape)):
# shape.append(r0.shape[i])
# #shape = r0.shape
v = []
eps = sys.float_info.min
tol = 1e-10
eval_ref = 1
ntest = 5
num_iter_max = num_iter + ntest - num_iter%ntest
r = r0
norm = np.linalg.norm(r0)
b.append(norm)
id = 0
for i in range(num_iter_max):
id = i
if b[i] < abs(eps*eval_ref):
r = np.random.rand(*shape)
r *= 1./np.linalg.norm(r)
Orthogonalize(r,v)
b[i] = np.linalg.norm(r)
v.append(r/b[i])
r = A.matvec(v[i])
if i > 0:
r -= b[i]*v[i-1]
a.append(np.sum(v[i]*r))
r -= a[i]*v[i]
Orthogonalize(r,v)
b.append(np.linalg.norm(r))
if (i+1)%ntest == 0:
evals, evecs = SP.linalg.eigh_tridiagonal(a,b[1:id+1],select='i', select_range=[0,0])
error = abs(b[i+1]*evecs[i])
eval_ref = evals
if error < tol:
break
if error > tol:
print("Lanczos failed, residual norm = {}".format(error))
#res = np.transpose(v).dot(evecs)
evecs_transformed = v[0]*evecs[0]
for i in range(1,id):
evecs_transformed += v[i]*evecs[i]
return evals, evecs_transformed/np.linalg.norm(evecs_transformed)
def Orthogonalize(r,v):
"""
Ortogonalize vector r with the vectors
contained in v
"""
fac = 0.7
tol = 1e-14
n0 = np.linalg.norm(r)
for i in range(len(v)):
prod = np.sum(r*v[i])
if abs(prod) > tol:
r -= prod*v[i]
if np.linalg.norm(r)/n0 < fac:
return
for i in range(len(v)):
prod = np.sum(r*v[i])
if abs(prod) > tol:
r -= prod*v[i]
#def lanczos2t(A, r0, num_iter):
# """
# Obtain the lowest eigevalue and eigenvector of a Linear Operator A
# Implementation of Lanczos algorithm.
# Following section 4.4 of the book " Templates for the solution of algebraic
# eigenvalue problems: a practical guide"
# Parameters
# ----------
# A : Linear Operator
#
# r0: vector
# Initial guest
# num_iter : int
# Number max of iteration
# Returns
# -------
# evals : numpy array
# Array of eigenvalues
# evecs_transformed : numpy array
# Array of eigenvectors in the original basis of A
# """
# a = []
# b = []
# m = r0.shape[0]
# v = []
# eps = sys.float_info.min
# tol = 1e-13
# eval_ref = 1
# num_iter_max = num_iter + 5 - num_iter%5
# r = r0
# norm = np.linalg.norm(r0)
# b.append(norm)
# id = 0
# for i in range(num_iter_max):
# id = i
#
# if b[i] < abs(eps*eval_ref):
# r = np.random.rand(m)
# r *= 1./np.linalg.norm(r)
# Orthogonalize(r,v)
# b[i] = np.linalg.norm(r)
#
# v.append(r/b[i])
# r = A.dot(v[i])
# if i > 0:
# r -= b[i]*v[i-1]
# a.append(np.dot(v[i],r))
# r -= a[i]*v[i]
# Orthogonalize(r,v)
# b.append(np.linalg.norm(r))
# if (i+1)%5 == 0:
# evals, evecs = SP.linalg.eigh_tridiagonal(a,b[1:id+1],select='i', select_range=[0,0])
# error = abs(b[i+1]*evecs[i])
# eval_ref = evals
# if error < tol:
# break
# if error > tol:
# print("Lanczos failed, residual norm = {}".format(error))
#
# res = np.transpose(v).dot(evecs)
# return evals, res/np.linalg.norm(res)
| 27.445183
| 97
| 0.526692
| 1,180
| 8,261
| 3.633051
| 0.167797
| 0.035923
| 0.047586
| 0.024259
| 0.733147
| 0.714719
| 0.707021
| 0.707021
| 0.695825
| 0.668999
| 0
| 0.021144
| 0.341605
| 8,261
| 301
| 98
| 27.445183
| 0.767053
| 0.461809
| 0
| 0.408333
| 0
| 0
| 0.01735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.033333
| 0
| 0.133333
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2114d6302acbdc76e559f24b678d898dd52b2591
| 546
|
py
|
Python
|
todo/validators.py
|
abhijit-mitra/fractal_service
|
234a8ae954eb856ca31e72b003117d8b97ce4171
|
[
"MIT"
] | 1
|
2020-02-10T17:49:35.000Z
|
2020-02-10T17:49:35.000Z
|
todo/validators.py
|
abhijit-mitra/fractal_service
|
234a8ae954eb856ca31e72b003117d8b97ce4171
|
[
"MIT"
] | 5
|
2020-06-06T00:36:03.000Z
|
2022-02-10T14:12:37.000Z
|
todo/validators.py
|
abhijit-mitra/fractal_service
|
234a8ae954eb856ca31e72b003117d8b97ce4171
|
[
"MIT"
] | null | null | null |
CREATE_TODO = [{
'field_name': 'bucketName',
'type': str,
}, {
'field_name': 'name',
'type': str,
}, {
'field_name': 'done',
'type': bool,
}, {
'field_name': 'bucketId',
'type': int,
'required': False,
'blank': (True,)
}]
UPDATE_TODO = [{
'field_name': 'name',
'type': str,
}, {
'field_name': 'done',
'type': bool,
'required': False
}, {
'field_name': 'bucketId',
'type': int,
'required': False,
'blank': (True,)
}, {
'field_name': 'bucketName',
'type': str
}]
| 16.545455
| 31
| 0.492674
| 54
| 546
| 4.796296
| 0.296296
| 0.277992
| 0.138996
| 0.185328
| 0.872587
| 0.671815
| 0.671815
| 0.671815
| 0.671815
| 0.316602
| 0
| 0
| 0.269231
| 546
| 32
| 32
| 17.0625
| 0.649123
| 0
| 0
| 0.870968
| 0
| 0
| 0.362637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2123b0afaa5ed96d9bc928d0c370baaf17f037f4
| 91
|
py
|
Python
|
SalesApp/apps.py
|
Kayarn-Mechatronics/Octello
|
45f4f73c764ca816918c31ef3ae4889740a68802
|
[
"Apache-2.0"
] | null | null | null |
SalesApp/apps.py
|
Kayarn-Mechatronics/Octello
|
45f4f73c764ca816918c31ef3ae4889740a68802
|
[
"Apache-2.0"
] | null | null | null |
SalesApp/apps.py
|
Kayarn-Mechatronics/Octello
|
45f4f73c764ca816918c31ef3ae4889740a68802
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class SalesappConfig(AppConfig):
name = 'SalesApp'
| 15.166667
| 33
| 0.758242
| 10
| 91
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 91
| 5
| 34
| 18.2
| 0.907895
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2136715c0e34258bc108b432c3821b98c34b07cd
| 14,491
|
py
|
Python
|
src/decoders/arcfactored.py
|
norikinishida/discourse-parsing
|
7377a78cc32ad6430d256694e31ed9426e7c6340
|
[
"Apache-2.0"
] | 2
|
2022-02-16T20:41:22.000Z
|
2022-03-11T18:28:24.000Z
|
src/decoders/arcfactored.py
|
norikinishida/discourse-parsing
|
7377a78cc32ad6430d256694e31ed9426e7c6340
|
[
"Apache-2.0"
] | null | null | null |
src/decoders/arcfactored.py
|
norikinishida/discourse-parsing
|
7377a78cc32ad6430d256694e31ed9426e7c6340
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
RIGHT = 0
LEFT = 1
COMPLETE = 0
INCOMPLETE = 1
class IncrementalEisnerDecoder(object):
def __init__(self):
self.decoder = EisnerDecoder()
def decode(self,
arc_scores,
edu_ids,
#
sentence_boundaries,
paragraph_boundaries,
use_sentence_boundaries,
use_paragraph_boundaries,
#
gold_heads=None):
"""
Parameters
----------
arc_scores: numpy.ndarray(shape=(n_edus, n_edus), dtype="float")
arc_scores: numpy.ndarray(shape=(n_edus, n_edus), dtype="float")
edu_ids: list[int]
sentence_boundaries: list[(int, int)]
paragraph_boundaries: list[(int, int)]
use_sentence_boundaries: bool
use_paragraph_boundaries: bool
gold_heads: numpy.ndarray(shape=(n_edus, n_edus), dtype=np.int32) or None
Returns
-------
list[(int, int)]
"""
assert edu_ids[0] == 0 # ROOT
arcs = []
# Exclude ROOT
new_edu_ids = edu_ids[1:]
# Sentence-level parsing
if use_sentence_boundaries:
target_bnds = sentence_boundaries
sub_arcs, new_edu_ids = self.apply_decoder(
arc_scores=arc_scores,
edu_ids=new_edu_ids,
target_bnds=target_bnds,
gold_heads=gold_heads)
arcs.extend(sub_arcs)
# Paragraph-level parsing
if use_paragraph_boundaries:
if use_sentence_boundaries:
target_bnds = paragraph_boundaries
else:
target_bnds = [(sentence_boundaries[b][0],sentence_boundaries[e][1]) for b,e in paragraph_boundaries]
sub_arcs, new_edu_ids = self.apply_decoder(
arc_scores=arc_scores,
edu_ids=new_edu_ids,
target_bnds=target_bnds,
gold_heads=gold_heads)
arcs.extend(sub_arcs)
# Document-level parsing
sub_arcs, head = self.decoder.decode_without_root(
arc_scores=arc_scores,
edu_ids=new_edu_ids,
gold_heads=gold_heads)
arcs.extend(sub_arcs)
# Root attachment
arcs.append((0, head))
return arcs
def apply_decoder(self,
arc_scores,
edu_ids,
target_bnds,
gold_heads):
"""
Parameters
----------
arc_scores: numpy.ndarray(shape=(n_edus, n_edus), dtype="float")
edu_ids: list[int]
target_bnds: list[(int, int)]
gold_heads: numpy.ndarray(shape=(n_edus, n_edus), dtype=np.int32)
Returns
-------
list[(int, int)]
list[int]
"""
arcs = [] # list of (int, int)
new_edu_ids = [] # list of int
for begin_i, end_i in target_bnds:
if begin_i == end_i:
sub_arcs = []
head = edu_ids[begin_i]
else:
sub_arcs, head = self.decoder.decode_without_root(
arc_scores=arc_scores,
edu_ids=edu_ids[begin_i:end_i+1],
gold_heads=gold_heads)
arcs.extend(sub_arcs)
new_edu_ids.append(head)
return arcs, new_edu_ids
class EisnerDecoder(object):
def __init__(self):
pass
def decode(self,
arc_scores,
edu_ids,
gold_heads=None):
"""
Parameters
----------
arc_scores: numpy.ndarray(shape=(n_edus, n_edus), dtype="float")
edu_ids: list[int]
gold_heads: numpy.ndarray(shape=(n_edus, n_edus), dtype=np.int32) or None
Returns
-------
list[(int, int)]
"""
assert edu_ids[0] == 0 # ROOT
# Initialize charts
chart = {} # {(int, int, int, int): float}
back_ptr = {} # {(int, int, int, int): float}
length = len(edu_ids)
# Base case
for i in range(length):
chart[i, i, LEFT, COMPLETE] = 0.0
chart[i, i, RIGHT, COMPLETE] = 0.0
chart[i, i, LEFT, INCOMPLETE] = 0.0
chart[i, i, RIGHT, INCOMPLETE] = 0.0
for i in range(length):
chart[0, i, LEFT, INCOMPLETE] = -np.inf
# General case (without ROOT)
for d in range(1, length):
for i1 in range(1, length - d): # NOTE
i3 = i1 + d
# Incomplete span
# Left tree
max_score = -np.inf
memo = None
arc_score = arc_scores[edu_ids[i3], edu_ids[i1]]
if gold_heads is not None:
if gold_heads[edu_ids[i1]] != edu_ids[i3]:
arc_score += 1.0
for i2 in range(i1, i3):
score = arc_score \
+ chart[i1, i2, RIGHT, COMPLETE] \
+ chart[i2+1, i3, LEFT, COMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[i1, i3, LEFT, INCOMPLETE] = max_score
back_ptr[i1, i3, LEFT, INCOMPLETE] = memo
# Right tree
max_score = -np.inf
memo = None
arc_score = arc_scores[edu_ids[i1], edu_ids[i3]]
if gold_heads is not None:
if gold_heads[edu_ids[i3]] != edu_ids[i1]:
arc_score += 1.0
for i2 in range(i1, i3):
score = arc_score \
+ chart[i1, i2, RIGHT, COMPLETE] \
+ chart[i2+1, i3, LEFT, COMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[i1, i3, RIGHT, INCOMPLETE] = max_score
back_ptr[i1, i3, RIGHT, INCOMPLETE] = memo
# Complete span
# Left tree
max_score = -np.inf
memo = None
for i2 in range(i1, i3):
score = chart[i1, i2, LEFT, COMPLETE] \
+ chart[i2, i3, LEFT, INCOMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[i1, i3, LEFT, COMPLETE] = max_score
back_ptr[i1, i3, LEFT, COMPLETE] = memo
# Right tree
max_score = -np.inf
memo = None
for i2 in range(i1, i3):
score = chart[i1, i2+1, RIGHT, INCOMPLETE] \
+ chart[i2+1, i3, RIGHT, COMPLETE]
if max_score < score:
max_score = score
memo = i2 + 1
chart[i1, i3, RIGHT, COMPLETE] = max_score
back_ptr[i1, i3, RIGHT, COMPLETE] = memo
# ROOT attachment
# arcs = self.recover_tree(back_ptr, 0, length-1, RIGHT, COMPLETE, arcs=None) # NOTE
max_score = -np.inf
memo = None
for i2 in range(1, length):
arc_score = arc_scores[edu_ids[0], edu_ids[i2]]
score = arc_score \
+ chart[0, 0, RIGHT, COMPLETE] \
+ chart[1, i2, LEFT, COMPLETE] \
+ chart[i2, length-1, RIGHT, COMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[0, length-1, RIGHT, COMPLETE] = max_score
back_ptr[0, length-1, RIGHT, COMPLETE] = memo
head = memo
# Recovering dependency arcs
arcs = [(0, head)]
arcs = self.recover_tree(back_ptr, 1, head, LEFT, COMPLETE, arcs=arcs)
arcs = self.recover_tree(back_ptr, head, length-1, RIGHT, COMPLETE, arcs=arcs)
# Shifting: local position -> global position
arcs = [(edu_ids[h], edu_ids[d]) for h,d in arcs]
return arcs
def decode_without_root(self,
arc_scores,
edu_ids,
gold_heads=None):
"""
Parameters
----------
arc_scores: numpy.ndarray(shape=(n_edus, n_edus), dtype="float")
edu_ids: list[int]
gold_heads: numpy.ndarray(shape=(n_edus, n_edus), dtype=np.int32) or None
Returns
list[(int, int)]
int
"""
assert edu_ids[0] != 0 # No ROOT
if len(edu_ids) == 1:
return [], edu_ids[0]
# Initialize charts
chart = {} # {(int, int, int, int): float}
back_ptr = {} # {(int, int, int, int): float}
length = len(edu_ids)
# Base case
for i in range(length):
chart[i, i, LEFT, COMPLETE] = 0.0
chart[i, i, RIGHT, COMPLETE] = 0.0
chart[i, i, LEFT, INCOMPLETE] = 0.0
chart[i, i, RIGHT, INCOMPLETE] = 0.0
# General case
for d in range(1, length):
for i1 in range(0, length - d): # NOTE: index "0" does NOT represent ROOT
i3 = i1 + d
# Incomplete span
# Left tree
max_score = -np.inf
memo = None
arc_score = arc_scores[edu_ids[i3], edu_ids[i1]]
if gold_heads is not None:
if gold_heads[edu_ids[i1]] != edu_ids[i3]:
arc_score += 1.0
for i2 in range(i1, i3):
score = arc_score \
+ chart[i1, i2, RIGHT, COMPLETE] \
+ chart[i2+1, i3, LEFT, COMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[i1, i3, LEFT, INCOMPLETE] = max_score
back_ptr[i1, i3, LEFT, INCOMPLETE] = memo
# Right tree
max_score = -np.inf
memo = None
arc_score = arc_scores[edu_ids[i1], edu_ids[i3]]
if gold_heads is not None:
if gold_heads[edu_ids[i3]] != edu_ids[i1]:
arc_score += 1.0
for i2 in range(i1, i3):
score = arc_score \
+ chart[i1, i2, RIGHT, COMPLETE] \
+ chart[i2+1, i3, LEFT, COMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[i1, i3, RIGHT, INCOMPLETE] = max_score
back_ptr[i1, i3, RIGHT, INCOMPLETE] = memo
# Complete span
# Left tree
max_score = -np.inf
memo = None
for i2 in range(i1, i3):
score = chart[i1, i2, LEFT, COMPLETE] \
+ chart[i2, i3, LEFT, INCOMPLETE]
if max_score < score:
max_score = score
memo = i2
chart[i1, i3, LEFT, COMPLETE] = max_score
back_ptr[i1, i3, LEFT, COMPLETE] = memo
# Right tree
max_score = -np.inf
memo = None
for i2 in range(i1, i3):
score = chart[i1, i2+1, RIGHT, INCOMPLETE] \
+ chart[i2+1, i3, RIGHT, COMPLETE]
if max_score < score:
max_score = score
memo = i2 + 1
chart[i1, i3, RIGHT, COMPLETE] = max_score
back_ptr[i1, i3, RIGHT, COMPLETE] = memo
# ROOT identification
max_score = -np.inf
memo = None
for i2 in range(0, length):
score = chart[0, i2, LEFT, COMPLETE] \
+ chart[i2, length-1, RIGHT, COMPLETE]
if max_score < score:
max_score = score
memo = i2
head = memo
# Recovering dependency arcs
arcs = self.recover_tree(back_ptr, 0, head, LEFT, COMPLETE, arcs=None)
arcs = self.recover_tree(back_ptr, head, length-1, RIGHT, COMPLETE, arcs=arcs)
# Shifting: local position -> global position
arcs = [(edu_ids[h], edu_ids[d]) for h,d in arcs]
head = edu_ids[head]
return arcs, head
def recover_tree(self, back_ptr, i1, i3, direction, complete, arcs=None):
"""
Parameters
----------
back_ptr: dict[(int, int, int, int), int]
i1: int
i3: int
direction: int
complete: int
arcs: list[(int, int)] or None
Returns
-------
list[(int, int)]
"""
if arcs is None:
arcs = []
if i1 == i3:
return arcs
i2 = back_ptr[i1, i3, direction, complete]
if complete == COMPLETE:
if direction == LEFT:
arcs = self.recover_tree(back_ptr, i1, i2, LEFT, COMPLETE, arcs=arcs)
arcs = self.recover_tree(back_ptr, i2, i3, LEFT, INCOMPLETE, arcs=arcs)
else:
arcs = self.recover_tree(back_ptr, i1, i2, RIGHT, INCOMPLETE, arcs=arcs)
arcs = self.recover_tree(back_ptr, i2, i3, RIGHT, COMPLETE, arcs=arcs)
else:
if direction == LEFT:
arcs.append((i3, i1))
arcs = self.recover_tree(back_ptr, i1, i2, RIGHT, COMPLETE, arcs=arcs)
arcs = self.recover_tree(back_ptr, i2+1, i3, LEFT, COMPLETE, arcs=arcs)
else:
arcs.append((i1, i3))
arcs = self.recover_tree(back_ptr, i1, i2, RIGHT, COMPLETE, arcs=arcs)
arcs = self.recover_tree(back_ptr, i2+1, i3, LEFT, COMPLETE, arcs=arcs)
return arcs
| 36.047264
| 117
| 0.459182
| 1,589
| 14,491
| 4.006923
| 0.069855
| 0.050887
| 0.040836
| 0.030627
| 0.773206
| 0.766923
| 0.719648
| 0.694519
| 0.678813
| 0.671902
| 0
| 0.030228
| 0.445242
| 14,491
| 401
| 118
| 36.137157
| 0.761786
| 0.143192
| 0
| 0.764479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011583
| 1
| 0.027027
| false
| 0.003861
| 0.003861
| 0
| 0.065637
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2142da03589a5f59758f0a45ca10464c6cbed83b
| 55
|
py
|
Python
|
nnuncert/models/pbp/__init__.py
|
pjoachims/nnuncert
|
45dede54fdb714926926d719be2c9b9b542b2601
|
[
"MIT"
] | 2
|
2021-12-30T06:25:43.000Z
|
2022-01-25T00:41:22.000Z
|
nnuncert/models/pbp/__init__.py
|
pjoachims/nnuncert
|
45dede54fdb714926926d719be2c9b9b542b2601
|
[
"MIT"
] | 1
|
2022-01-25T00:35:28.000Z
|
2022-03-28T15:23:16.000Z
|
nnuncert/models/pbp/__init__.py
|
pjoachims/nnuncert
|
45dede54fdb714926926d719be2c9b9b542b2601
|
[
"MIT"
] | null | null | null |
from nnuncert.models.pbp.model import PBPModel, PBPPred
| 55
| 55
| 0.854545
| 8
| 55
| 5.875
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 55
| 1
| 55
| 55
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
dcdc351e3f1ef4a2167b8f72258374c86a28517f
| 308
|
py
|
Python
|
opencv_learn/charpter15/test.py
|
zhangxinzhou/play_game
|
854448f8416b2d3f98bb2c3ed0f7d834a61593de
|
[
"Apache-2.0"
] | null | null | null |
opencv_learn/charpter15/test.py
|
zhangxinzhou/play_game
|
854448f8416b2d3f98bb2c3ed0f7d834a61593de
|
[
"Apache-2.0"
] | null | null | null |
opencv_learn/charpter15/test.py
|
zhangxinzhou/play_game
|
854448f8416b2d3f98bb2c3ed0f7d834a61593de
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
am = np.array(
[
[3, 6, 8, 77, 66],
[1, 2, 88, 3, 98],
[11, 2, 67, 5, 2]
]
)
b = np.where(am > 5)
print(type(b))
print(b)
print("==========")
for i in zip(b):
print(i)
print("==========")
print(*b)
print("==========")
for i in zip(*b):
print(i)
| 12.833333
| 26
| 0.405844
| 51
| 308
| 2.45098
| 0.509804
| 0.24
| 0.176
| 0.224
| 0.432
| 0.432
| 0.432
| 0.432
| 0.432
| 0.432
| 0
| 0.100917
| 0.292208
| 308
| 23
| 27
| 13.391304
| 0.472477
| 0
| 0
| 0.263158
| 0
| 0
| 0.097403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0.421053
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
0d3db71ae427d65d2e4455da73eb382d03b40227
| 609
|
py
|
Python
|
climetlab/core/docstring.py
|
sylvielamythepaut/climetlab
|
59516b8a510ad506a12ad32bea9e8b98bdb9abf3
|
[
"Apache-2.0"
] | 1
|
2021-10-02T12:30:12.000Z
|
2021-10-02T12:30:12.000Z
|
climetlab/core/docstring.py
|
sylvielamythepaut/climetlab
|
59516b8a510ad506a12ad32bea9e8b98bdb9abf3
|
[
"Apache-2.0"
] | null | null | null |
climetlab/core/docstring.py
|
sylvielamythepaut/climetlab
|
59516b8a510ad506a12ad32bea9e8b98bdb9abf3
|
[
"Apache-2.0"
] | null | null | null |
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
# Keep linters happy
# N801 = classes should start with uppercase
class docstring: # noqa: N801
def __init__(self):
pass
def __call__(self, func):
# func.__doc__ += "Decorated...."
return func
| 29
| 78
| 0.712644
| 88
| 609
| 4.795455
| 0.806818
| 0.009479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029106
| 0.210181
| 609
| 20
| 79
| 30.45
| 0.848233
| 0.766831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
b4c056643da6f1fbb3dd080732017533a6843b7f
| 375
|
py
|
Python
|
trec2015/cuttsum/pipeline/__init__.py
|
kedz/cuttsum
|
992c21192af03fd2ef863f5ab7d10752f75580fa
|
[
"Apache-2.0"
] | 6
|
2015-09-10T02:22:21.000Z
|
2021-10-01T16:36:46.000Z
|
trec2015/cuttsum/pipeline/__init__.py
|
kedz/cuttsum
|
992c21192af03fd2ef863f5ab7d10752f75580fa
|
[
"Apache-2.0"
] | null | null | null |
trec2015/cuttsum/pipeline/__init__.py
|
kedz/cuttsum
|
992c21192af03fd2ef863f5ab7d10752f75580fa
|
[
"Apache-2.0"
] | 2
|
2018-04-04T10:44:32.000Z
|
2021-10-01T16:37:26.000Z
|
from cuttsum.pipeline._article_annotator import ArticlesResource
from cuttsum.pipeline._dedupe import DedupedArticlesResource
from cuttsum.pipeline._features import SentenceFeaturesResource
from cuttsum.pipeline._input_stream import InputStreamResource
__all__ = ["ArticlesResource", "DedupedArticlesResource",
"SentenceFeaturesResource", "InputStreamResource"]
| 46.875
| 64
| 0.848
| 31
| 375
| 9.935484
| 0.483871
| 0.142857
| 0.246753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 375
| 7
| 65
| 53.571429
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0.218667
| 0.125333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
b4fcfcdcd2f4bfb8a31e1b613345fad0c5f69146
| 181
|
py
|
Python
|
pm4pyws/handlers/xes/filtering/versions/__init__.py
|
ehbasouri/pm4py-ws
|
9bf5f88848a4aa2873bae86af95d37f64ae1dde1
|
[
"Apache-2.0"
] | null | null | null |
pm4pyws/handlers/xes/filtering/versions/__init__.py
|
ehbasouri/pm4py-ws
|
9bf5f88848a4aa2873bae86af95d37f64ae1dde1
|
[
"Apache-2.0"
] | null | null | null |
pm4pyws/handlers/xes/filtering/versions/__init__.py
|
ehbasouri/pm4py-ws
|
9bf5f88848a4aa2873bae86af95d37f64ae1dde1
|
[
"Apache-2.0"
] | null | null | null |
from pm4pyws.handlers.xes.filtering.versions import start_activities, end_activities, attributes_pos_trace, \
attributes_neg_trace, attributes_pos_events, attributes_neg_events
| 60.333333
| 109
| 0.867403
| 23
| 181
| 6.391304
| 0.652174
| 0.176871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005988
| 0.077348
| 181
| 2
| 110
| 90.5
| 0.874252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
3701addf0f1b3f3999af1c9ffe555fda45bfbb45
| 5,022
|
py
|
Python
|
TokenizeTweet.py
|
jtpastro/twitter_traffic_incident_sensor
|
d9191e4db4dfe48e689817511e3b97b5901893ac
|
[
"MIT"
] | null | null | null |
TokenizeTweet.py
|
jtpastro/twitter_traffic_incident_sensor
|
d9191e4db4dfe48e689817511e3b97b5901893ac
|
[
"MIT"
] | null | null | null |
TokenizeTweet.py
|
jtpastro/twitter_traffic_incident_sensor
|
d9191e4db4dfe48e689817511e3b97b5901893ac
|
[
"MIT"
] | null | null | null |
from nltk.tokenize import TweetTokenizer
from nltk.stem import RSLPStemmer
from unidecode import unidecode as unicodeToAscii
from nltk.corpus import stopwords
import re
class Token(str):
timeRegex = re.compile(r'^\d{1,2}((h(\d{2})?)|:\d{2})$')
dateRegex = re.compile(r'^\d{1,2}/\d{1,2}(/\d{2,4})?$')
wordRegex = re.compile(r'^([a-z-]+|[#@]\w+)$')
numberRegex = re.compile(r'^-?\d+([,\.]?\d)*$')
def __new__(cls, value):
if Token.wordRegex.match(value):
obj = str.__new__(cls, value)
elif Token.timeRegex.match(value):
obj = str.__new__(cls, '__TTKN__')
elif Token.dateRegex.match(value):
obj = str.__new__(cls, '__DTKN__')
elif Token.numberRegex.match(value):
obj = str.__new__(cls, '__NTKN__')
else:
obj = str.__new__(cls, '')
obj.value = value
return obj
class FilterTokenizer(TweetTokenizer):
def __init__(self, filterStopwords=True, stemming=False, groupClasses=True):
self.groupClasses = groupClasses
if stemming:
self.stemmer = RSLPStemmer()
else:
self.stemmer = lambda: None
self.stemmer.stem = lambda x: x
self.stopwords = [unicodeToAscii(sw) for sw in stopwords.words('portuguese')] if filterStopwords else []
super().__init__(preserve_case=False,reduce_len=True)
def tokenize(self, tweet):
for tkn in super().tokenize(unicodeToAscii(tweet)):
if self.groupClasses:
tkn = Token(tkn)
if len(tkn) > 1 and not tkn in self.stopwords:
yield self.stemmer.stem(tkn)
if __name__ == '__main__':
tt = FilterTokenizer()
tweets = ["Problema mesmo \u00e9 na BR386: diria que h\u00e1 uns 10 km de congestionamento em cada sentido. \nNo C/I, tranca da BR448 at\u00e9 a Ponte do Ca\u00ed.\nNo I/C, tranca antes do acesso ao polo petroqu\u00edmico at\u00e9 a Ponte do Ca\u00ed.\nVai demorar algumas horas pra normalizar ap\u00f3s acidente @GauchaZH", "16h43 - Aproveite o final de semana com consci\u00eancia! \u00c1lcool e dire\u00e7\u00e3o n\u00e3o combinam. #Educa\u00e7\u00e3oEPTCpic.twitter.com/FAHeKL4UKF", "Fim das obras na Av. Crist\u00f3v\u00e3o Colombo com Ramiro Barcelos. Tr\u00e2nsito volta a fluir melhor na regi\u00e3o. Mas Ramiro segue movimentada na descida, rumo \u00e0 Legalidade @GauchaZH", "16h37 - Tr\u00e2nsito totalmente liberado na R. Ramiro Barcelos esq. com a Av. Crist\u00f3v\u00e3o Colombo. Tr\u00e2nsito fluindo bem na regi\u00e3o.", "Regi\u00e3o do Aeroporto bastante movimentada nesta tarde. Sa\u00edda com mais tr\u00e2nsito pela Terceira Perimetral e Sert\u00f3rio. Chegada \u00e0 Capital ainda sem tranqueiras @GauchaZHpic.twitter.com/uCD6lquRpP", " ATEN\u00c7\u00c3O PARA BLOQUEIO pic.twitter.com/1S8bokO7rq", " ATEN\u00c7\u00c3O PARA BLOQUEIO pic.twitter.com/IAttzhDSkU", "O curso EAD \"Pedalando com seguran\u00e7a\" gratuito\n\nInscri\u00e7\u00f5es: https://goo.gl/8aPAmJ\u00a0pic.twitter.com/S0nLHY7eBB", "ATEN\u00c7\u00c3O!!!!https://twitter.com/PRF191RS/status/972483823580647427\u00a0\u2026", "https://gauchazh.clicrbs.com.br/esportes/gauchao/noticia/2018/03/bm-reforca-policiamento-no-entorno-do-beira-rio-e-orienta-deslocamento-de-torcidas-para-o-gre-nal-413-cjelfllns01xs01p46a7ljlfu.html\u00a0\u2026", "BM refor\u00e7a policiamento no entorno do Beira-Rio e orienta deslocamento de torcidas para o Gre-Nal 413. O esquema de seguran\u00e7a, tr\u00e2nsito e locais das concentra\u00e7\u00f5es de torcidas aqui: \nhttps://gauchazh.clicrbs.com.br/esportes/gauchao/noticia/2018/03/bm-reforca-policiamento-no-entorno-do-beira-rio-e-orienta-deslocamento-de-torcidas-para-o-gre-nal-413-cjelfllns01xs01p46a7ljlfu.html\u00a0\u2026 @GauchaZHpic.twitter.com/tYjCSS3umr", "concentra\u00e7\u00e3o na pra\u00e7a do canh\u00e3o, na marinha. sa\u00edda \u00e0s 15h pro est\u00e1dio", "Segundo a EPTC, a tarifa da lota\u00e7\u00e3o pode variar no m\u00ednimo 1,4 vezes o valor do \u00f4nibus. Com a passagem de \u00f4nibus a 4,30, d\u00e1 6,02 em 1,4x. Foi arrendondado pra mais, 6,05, porque se ficasse em 6 a\u00ed seria menos de 1,4x. Entende? hehe", "Pois\u00e9, estamos esclarecendo isso agora. Obrigada pelo toque!", "a\u00ed tem que ver com as torcidas organizadas. s\u00e3o eles que organizam esses transportes.", "N\u00e3o h\u00e1 ciclofaixa? Ande do lado direito junto ao meio fio.pic.twitter.com/plQ5kMuVgU", "Usu\u00e1rios t\u00eam at\u00e9 segunda para recarregar cart\u00e3o TRI sem o reajuste da passagem de \u00f4nibus em Porto Alegre. Mais esclarecimentos sobre as mudan\u00e7as na tarifa em @GauchaZH: https://gauchazh.clicrbs.com.br/porto-alegre/noticia/2018/03/usuarios-tem-ate-segunda-para-recarregar-cartao-tri-sem-o-reajuste-da-passagem-de-onibus-em-porto-alegre-cjelddax701y101r4lbpirec4.html\u00a0\u2026 pic.twitter.com/th6keq4zOy",
"a 10/12 1:2 c/i centro/bairro centro-bairro"]
for tweet in tweets:
print(" ".join([tkn for tkn in tt.tokenize(tweet)]))
| 102.489796
| 3,164
| 0.725408
| 734
| 5,022
| 4.889646
| 0.438692
| 0.025077
| 0.012538
| 0.016718
| 0.220396
| 0.197548
| 0.165784
| 0.138479
| 0.116188
| 0.116188
| 0
| 0.085101
| 0.155317
| 5,022
| 49
| 3,165
| 102.489796
| 0.760962
| 0
| 0
| 0.044444
| 0
| 0.288889
| 0.64344
| 0.072068
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.022222
| 0.111111
| 0
| 0.333333
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2e9a7cbe81552d1fd36c3adbe156d256d2b8546b
| 193
|
py
|
Python
|
pingdomexport/tests/test_export.py
|
mattboston/pingdomexport
|
1cd7acbf813abee0b9a7865b9cd4a1e166d55c37
|
[
"MIT"
] | 4
|
2018-01-25T09:18:38.000Z
|
2021-02-12T18:36:08.000Z
|
pingdomexport/tests/test_export.py
|
mattboston/pingdomexport
|
1cd7acbf813abee0b9a7865b9cd4a1e166d55c37
|
[
"MIT"
] | 1
|
2018-12-04T18:42:06.000Z
|
2021-05-25T14:03:32.000Z
|
pingdomexport/tests/test_export.py
|
mattboston/pingdomexport
|
1cd7acbf813abee0b9a7865b9cd4a1e166d55c37
|
[
"MIT"
] | 3
|
2019-04-30T11:52:14.000Z
|
2021-03-24T20:58:04.000Z
|
import pytest
from pingdomexport import export
class TestExport:
def test_export_path_unrecognized(self):
with pytest.raises(ValueError):
export.Export("unrecognized")
| 24.125
| 44
| 0.735751
| 21
| 193
| 6.619048
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196891
| 193
| 7
| 45
| 27.571429
| 0.896774
| 0
| 0
| 0
| 0
| 0
| 0.062176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2ea00266b8ce0afa65518c54a9e3e2b20a676e64
| 5,746
|
py
|
Python
|
environments/MiniGrid/loggingFunctions.py
|
vanstrn/RL_public
|
0e971e40e063b17918460e19728f95d7924af8db
|
[
"MIT"
] | 1
|
2021-03-19T17:57:51.000Z
|
2021-03-19T17:57:51.000Z
|
environments/MiniGrid/loggingFunctions.py
|
vanstrn/RL_public
|
0e971e40e063b17918460e19728f95d7924af8db
|
[
"MIT"
] | null | null | null |
environments/MiniGrid/loggingFunctions.py
|
vanstrn/RL_public
|
0e971e40e063b17918460e19728f95d7924af8db
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import itertools
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def ConstructSampleMG4R(env,position):
grid = env.grid.encode()
flagX,flagY = np.unravel_index(np.argmax(grid[:,:,0], axis=None), grid[:,:,0].shape)
grid[flagX,flagY] = np.array([
8,
8,
8
])
if grid[position[0],position[1],1] == 5:
return None
grid[position[0],position[1],0] = 10
grid[position[0],position[1],2] = 10
return grid[:,:,np.r_[0,2]]
def ConstructSampleMG4RP(env,position):
cell = env.grid.get(*position)
if cell.type in ["goal",'lava']:
return None
env.agent_pos = position
return env.render(mode = "nah, No render")
class ValuePredictionEvaluation(tf.keras.callbacks.Callback):
def __init__(self,superEpochs,env,network,imageDir=None,freq=50):
self.env = env
self.network=network[3]
self.imageDir=imageDir
self.freq = freq
self.superEpochs = superEpochs
def on_train_end(self, logs=None):
if self.superEpochs%self.freq == 0:
self.env.reset()
rewardMap = np.zeros([self.env.width,self.env.height])
for i,j in itertools.product(range(self.env.width),range(self.env.height)):
grid = ConstructSampleMG4R(self.env,[i,j])
if grid is None: continue
value = self.network.predict(np.expand_dims(grid,0))
rewardMap[i,j] = value
fig=plt.figure(figsize=(5.5, 5.5))
fig.add_subplot(1,1,1)
plt.title("Value Prediction Epoch "+str(self.superEpochs))
imgplot = plt.imshow(rewardMap)
fig.colorbar(imgplot)
plt.savefig(self.imageDir+"/ValuePred"+str(self.superEpochs)+".png")
plt.close()
class StatePredictionEvaluation(tf.keras.callbacks.Callback):
def __init__(self,env,network,imageDir=None,freq=50):
self.env = env
self.network=network[0]
self.imageDir = imageDir
self.freq = freq
def on_epoch_end(self,epoch, logs=None):
if epoch%self.freq == 0:
state = self.env.reset()
state_new,reward = self.network.predict(state)
fig=plt.figure(figsize=(5.5, 5.5))
fig.add_subplot(1,1,1)
plt.title("Predicted Next State Epoch "+str(epoch))
imgplot = plt.imshow(state_new[0,:,:,0],vmin=0, vmax=10)
plt.savefig(self.imageDir+"/StatePredEpoch"+str(epoch)+".png")
plt.close()
class StatePredictionEvaluation_action(tf.keras.callbacks.Callback):
def __init__(self,env,network,imageDir=None,freq=50):
self.env = env
self.network=network[0]
self.imageDir = imageDir
self.freq = freq
def on_epoch_end(self,epoch, logs=None):
if epoch%self.freq == 0:
state = self.env.reset()
fig=plt.figure(figsize=(17, 5.5))
fig.add_subplot(1,5,1)
plt.title("State Epoch "+str(epoch))
imgplot = plt.imshow(state[0,:,:,0],vmin=0, vmax=10)
for i in range(4):
act = np.zeros([1,4])
act[0,i] = 1
state_new,reward = self.network.predict([act,state])
fig.add_subplot(1,5,i+2)
plt.title("Predicted Next State Epoch "+str(epoch))
imgplot = plt.imshow(state_new[0,:,:,0],vmin=0, vmax=10)
plt.savefig(self.imageDir+"/StatePredEpoch"+str(epoch)+".png")
plt.close()
class RewardPredictionEvaluation(tf.keras.callbacks.Callback):
def __init__(self,env,network,imageDir=None,freq=50):
self.env = env
self.network=network[0]
self.imageDir = imageDir
self.freq = freq
def on_epoch_end(self,epoch, logs=None):
if epoch%self.freq == 0:
self.env.reset()
rewardMap = np.zeros([self.env.width,self.env.height])
for i,j in itertools.product(range(self.env.width),range(self.env.height)):
grid = ConstructSampleMG4R(self.env,[i,j])
if grid is None: continue
state_new,reward = self.network.predict(np.expand_dims(grid,0))
rewardMap[i,j] = reward
fig=plt.figure(figsize=(5.5, 5.5))
fig.add_subplot(1,1,1)
plt.title("Reward Prediction Epoch "+str(epoch))
imgplot = plt.imshow(rewardMap)
fig.colorbar(imgplot)
plt.savefig(self.imageDir+"/RewardPred"+str(epoch)+".png")
plt.close()
class RewardPredictionEvaluation_action(tf.keras.callbacks.Callback):
def __init__(self,env,network,imageDir=None,freq=50):
self.env = env
self.network=network[0]
self.imageDir = imageDir
self.freq = freq
def on_epoch_end(self,epoch, logs=None):
if epoch%self.freq == 0:
self.env.reset()
rewardMap = np.zeros([self.env.width,self.env.height])
for i,j in itertools.product(range(self.env.width),range(self.env.height)):
grid = ConstructSampleMG4R(self.env,[i,j])
if grid is None: continue
act = np.zeros([1,4])
# act[0,i] = 1
state_new,reward = self.network.predict([np.stack([[0,0,0,0]]),np.expand_dims(grid,0)])
rewardMap[i,j] = reward
fig=plt.figure(figsize=(5.5, 5.5))
fig.add_subplot(1,1,1)
plt.title("Reward Prediction Epoch "+str(epoch))
imgplot = plt.imshow(rewardMap)
fig.colorbar(imgplot)
plt.savefig(self.imageDir+"/RewardPred"+str(epoch)+".png")
plt.close()
| 38.563758
| 103
| 0.586147
| 749
| 5,746
| 4.429907
| 0.152203
| 0.061181
| 0.007233
| 0.025316
| 0.802592
| 0.757987
| 0.729958
| 0.697408
| 0.685654
| 0.685654
| 0
| 0.027585
| 0.274452
| 5,746
| 148
| 104
| 38.824324
| 0.76829
| 0.002088
| 0
| 0.653846
| 0
| 0
| 0.042045
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.038462
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2ea1b5690a4c5c82af8619cc0e9a2fcfb34a9b8a
| 20
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Sums, products, differences within array/cumsum Cumulative sum of the elements along a given axis.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Sums, products, differences within array/cumsum Cumulative sum of the elements along a given axis.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Vectorized (universal) functions/Sums, products, differences within array/cumsum Cumulative sum of the elements along a given axis.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
np.cumsum(a, axis=0)
| 20
| 20
| 0.7
| 5
| 20
| 2.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.05
| 20
| 1
| 20
| 20
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2eb0eedeada92d18cbeda5e7f5647f60fe40731b
| 121
|
py
|
Python
|
setup.py
|
neerajsharma9195/adversarial-recommendation-systems
|
46500d18a7175237f07df80af4f9b25e9d4c1188
|
[
"MIT"
] | 2
|
2021-03-05T17:12:53.000Z
|
2021-03-19T18:04:20.000Z
|
setup.py
|
neerajsharma9195/adversarial-recommendation-systems
|
46500d18a7175237f07df80af4f9b25e9d4c1188
|
[
"MIT"
] | 1
|
2021-03-06T00:58:56.000Z
|
2021-03-06T00:58:56.000Z
|
setup.py
|
neerajsharma9195/adversarial-recommendation-systems
|
46500d18a7175237f07df80af4f9b25e9d4c1188
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize("src/models/*.pyx")
)
| 20.166667
| 47
| 0.760331
| 16
| 121
| 5.6875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140496
| 121
| 6
| 48
| 20.166667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
2edfead133d3f7ee510af74ca2d8c57988538838
| 31
|
py
|
Python
|
Python/float.py
|
zSucrilhos/programming
|
aa0076a4a7084a6064e1e5df258ba0c90cf8ceeb
|
[
"MIT"
] | null | null | null |
Python/float.py
|
zSucrilhos/programming
|
aa0076a4a7084a6064e1e5df258ba0c90cf8ceeb
|
[
"MIT"
] | 4
|
2020-07-18T03:27:03.000Z
|
2020-07-18T03:28:37.000Z
|
Python/float.py
|
zSucrilhos/programming
|
aa0076a4a7084a6064e1e5df258ba0c90cf8ceeb
|
[
"MIT"
] | null | null | null |
a=1.23
print(a)
print(type(a))
| 7.75
| 14
| 0.645161
| 8
| 31
| 2.5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0.096774
| 31
| 3
| 15
| 10.333333
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
2ef97251288ee2925580a4ac7956e57e50d63431
| 736
|
py
|
Python
|
Problems/Dynamic Programming/Easy/PascalTriangle/test_pascal_triangle.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1
|
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Dynamic Programming/Easy/PascalTriangle/test_pascal_triangle.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/Easy/PascalTriangle/test_pascal_triangle.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from pascal_triangle import generate, getRow
class Test(TestCase):
def test_generate(self):
self.assertTrue(generate(1) == [[1]])
self.assertTrue(generate(2) == [[1], [1, 1]])
self.assertTrue(generate(3) == [[1], [1, 1], [1, 2, 1]])
self.assertTrue(generate(4) == [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]])
self.assertTrue(generate(5) == [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]])
def test_get_row(self):
self.assertTrue(getRow(0) == [1])
self.assertTrue(getRow(1) == [1, 1])
self.assertTrue(getRow(2) == [1, 2, 1])
self.assertTrue(getRow(3) == [1, 3, 3, 1])
self.assertTrue(getRow(4) == [1, 4, 6, 4, 1])
| 40.888889
| 95
| 0.53125
| 112
| 736
| 3.455357
| 0.196429
| 0.087855
| 0.310078
| 0.237726
| 0.426357
| 0.149871
| 0.05168
| 0.05168
| 0.05168
| 0.05168
| 0
| 0.106952
| 0.237772
| 736
| 17
| 96
| 43.294118
| 0.582888
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2c0a17955f0b3a41b6d6d982db2b74f30e4c43af
| 215
|
py
|
Python
|
Django/api-basic2/accounts/views.py
|
sug5806/TIL
|
2309d8a270e4a7b8961268a40b6492c5db317e37
|
[
"MIT"
] | null | null | null |
Django/api-basic2/accounts/views.py
|
sug5806/TIL
|
2309d8a270e4a7b8961268a40b6492c5db317e37
|
[
"MIT"
] | 102
|
2020-02-12T00:10:33.000Z
|
2022-03-11T23:58:41.000Z
|
Django/api-basic2/accounts/views.py
|
sug5806/TIL
|
2309d8a270e4a7b8961268a40b6492c5db317e37
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from .serializers import *
# Create your views here.
class AccountLCAPI(generics.ListCreateAPIView):
queryset = get_user_model().objects.all()
serializer_class = Account
| 26.875
| 47
| 0.786047
| 25
| 215
| 6.6
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 215
| 8
| 48
| 26.875
| 0.891892
| 0.106977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2c1c9277947e14ab23b6c2966b1322b29eb10062
| 162
|
py
|
Python
|
problem0427.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0427.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0427.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
#
# #427 n-sequences - Project Euler
# https://projecteuler.net/problem=427
#
# Code by Kevin Marciniak
#
###########################
| 18
| 38
| 0.462963
| 14
| 162
| 5.357143
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.111111
| 162
| 8
| 39
| 20.25
| 0.479167
| 0.567901
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2c294b7cb6cd4e6c15ae5e7c5d1cf3badafff066
| 189
|
py
|
Python
|
comments/non_persistent_models.py
|
taufactor/tau-factor
|
d7883156bd9502742e0ad5c798fa1b2c38c7ff60
|
[
"MIT"
] | null | null | null |
comments/non_persistent_models.py
|
taufactor/tau-factor
|
d7883156bd9502742e0ad5c798fa1b2c38c7ff60
|
[
"MIT"
] | null | null | null |
comments/non_persistent_models.py
|
taufactor/tau-factor
|
d7883156bd9502742e0ad5c798fa1b2c38c7ff60
|
[
"MIT"
] | 1
|
2021-05-18T19:01:14.000Z
|
2021-05-18T19:01:14.000Z
|
import typing
from courses import models as courses_models
class CreateCourseCommentParams(typing.NamedTuple):
course: courses_models.Course
title: str
content: str
| 18.9
| 52
| 0.746032
| 21
| 189
| 6.619048
| 0.619048
| 0.18705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21164
| 189
| 9
| 53
| 21
| 0.932886
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
257c374a83306162a9cc67c4c3e292c420eab766
| 9,511
|
py
|
Python
|
rlkit/samplers/rollout_functions.py
|
AndrewPaulChester/rlkit
|
0743c713d60250013803f7f158a38b431f6c9fa9
|
[
"MIT"
] | null | null | null |
rlkit/samplers/rollout_functions.py
|
AndrewPaulChester/rlkit
|
0743c713d60250013803f7f158a38b431f6c9fa9
|
[
"MIT"
] | null | null | null |
rlkit/samplers/rollout_functions.py
|
AndrewPaulChester/rlkit
|
0743c713d60250013803f7f158a38b431f6c9fa9
|
[
"MIT"
] | null | null | null |
import numpy as np
from gym_craft.utils.representations import json_to_screen
def multitask_rollout(
env,
agent,
max_path_length=np.inf,
render=False,
render_kwargs=None,
observation_key=None,
desired_goal_key=None,
get_action_kwargs=None,
return_dict_obs=False,
):
if render_kwargs is None:
render_kwargs = {}
if get_action_kwargs is None:
get_action_kwargs = {}
dict_obs = []
dict_next_obs = []
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
next_observations = []
path_length = 0
agent.reset()
o = env.reset()
if render:
env.render(**render_kwargs)
goal = o[desired_goal_key]
while path_length < max_path_length:
dict_obs.append(o)
if observation_key:
o = o[observation_key]
new_obs = np.hstack((o, goal))
a, agent_info = agent.get_action(new_obs, **get_action_kwargs)
next_o, r, d, env_info = env.step(a)
if render:
env.render(**render_kwargs)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
next_observations.append(next_o)
dict_next_obs.append(next_o)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
next_observations = np.array(next_observations)
if return_dict_obs:
observations = dict_obs
next_observations = dict_next_obs
return dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
goals=np.repeat(goal[None], path_length, 0),
full_observations=dict_obs,
)
def rollout(env, agent, max_path_length=np.inf, render=False, render_kwargs=None):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
observations = []
actions = []
explored = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = env.reset()
agent.reset()
next_o = None
path_length = 0
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
(a, e), agent_info = agent.get_action(o)
next_o, r, d, env_info = env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
explored.append(e)
agent_infos.append(agent_info)
env_infos.append(env_info)
# ADDED THIS SECTION TO HANDLE INTERMEDIATE EXPERIENCE
if "intermediate_experience" in env_info:
path_length += len(env_info["intermediate_experience"])
path_length += 1
if d:
break
o = next_o
if render:
env.render(**render_kwargs)
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack((observations[1:, :], np.expand_dims(next_o, 0)))
return dict(
observations=observations,
actions=actions,
explored=np.array(explored).reshape(-1, 1),
rewards=np.array(rewards, dtype=np.float32).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
def intermediate_rollout(
env,
agent,
restart=True,
starting_obs=None,
max_path_length=np.inf,
render=False,
render_kwargs=None,
experience_interval=1,
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
observations = []
actions = []
explored = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
if restart:
o = env.reset()
agent.reset()
else:
o = starting_obs
next_o = None
path_length = 0
i = 0
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
(a, e), agent_info = agent.get_action(o)
try:
a = a.item()
except AttributeError:
pass
if isinstance(o, str):
o = env.observation_space.converter(o)
next_o, r, d, env_info = env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
explored.append(e)
agent_infos.append(agent_info)
env_infos.append(env_info)
if i % experience_interval == 0:
path_length += 1
i += 1
step_timeout, step_complete, plan_ended = agent.check_action_status([next_o])
if d or step_timeout[0] or plan_ended[0]:
break
o = next_o
if render:
env.render(**render_kwargs)
if isinstance(next_o, str):
next_o_converted = env.observation_space.converter(next_o)
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o_converted = np.array([next_o_converted])
next_observations = np.vstack(
(observations[1:, :], np.expand_dims(next_o_converted, 0))
)
return (
dict(
observations=observations,
actions=actions,
explored=np.array(explored).reshape(-1, 1),
rewards=np.array(rewards, dtype=np.float32).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
),
(d, next_o),
)
def hierarchical_rollout(
env, agent, max_path_length=np.inf, render=False, render_kwargs=None
):
"""
The following value for the following keys will be a 2D array, with the
first dimension corresponding to the time dimension.
- observations
- actions
- rewards
- next_observations
- terminals
The next two elements will be lists of dictionaries, with the index into
the list being the index into the time
- agent_infos
- env_infos
"""
if render_kwargs is None:
render_kwargs = {}
observations = []
actions = []
explored = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = env.reset()
agent.reset()
next_o = None
path_length = 0
cumulative_reward = 0
first_time = True
if render:
env.render(**render_kwargs)
while path_length < max_path_length:
(a, e), agent_info = agent.get_action(o, [0])
next_o, r, d, env_info = env.step(a)
if agent_info.get("subgoal") is not None:
img = json_to_screen(o)
observations.append(img)
actions.append(agent_info["subgoal"])
explored.append(e)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if not first_time:
rewards.append(cumulative_reward)
terminals.append(d)
first_time = False
cumulative_reward = 0
cumulative_reward += r
if d:
break
o = next_o
if render:
env.render(**render_kwargs)
rewards.append(cumulative_reward)
terminals.append(d)
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(observations[1:, :], np.expand_dims(json_to_screen(next_o), 0))
)
return dict(
observations=observations,
actions=actions,
explored=np.array(explored).reshape(-1, 1),
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
| 28.996951
| 85
| 0.608453
| 1,166
| 9,511
| 4.759863
| 0.109777
| 0.024324
| 0.035135
| 0.035676
| 0.761441
| 0.753514
| 0.72
| 0.698018
| 0.698018
| 0.694775
| 0
| 0.009954
| 0.292293
| 9,511
| 327
| 86
| 29.085627
| 0.814589
| 0.112186
| 0
| 0.722846
| 0
| 0
| 0.007248
| 0.005557
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014981
| false
| 0.003745
| 0.007491
| 0
| 0.037453
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
259ab6bb6481700cf4ae5e2a5b3a42d34a274d4f
| 1,212
|
py
|
Python
|
tests/_math/test_validations.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | 12
|
2021-09-12T15:51:45.000Z
|
2022-03-05T22:25:47.000Z
|
tests/_math/test_validations.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | 36
|
2021-09-13T08:01:27.000Z
|
2022-03-21T11:53:30.000Z
|
tests/_math/test_validations.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from piquasso._math.validations import is_natural, all_natural
def test_zero_is_natural():
assert is_natural(0)
def test_positive_integers_are_natural():
assert is_natural(2)
def test_negative_integers_are_natural():
assert not is_natural(-2)
def test_floats_close_to_integers_count_as_natural():
assert is_natural(2.0)
def test_floats_NOT_close_to_integers_do_NOT_count_as_natural():
assert not is_natural(2.5)
def test_all_natural_positive_case():
assert all_natural([1, 1.0, 0.0, 2.0])
def test_all_natural_negative_case():
assert not all_natural([1, 1.0, 0.0, -2.0])
| 26.933333
| 74
| 0.766502
| 196
| 1,212
| 4.505102
| 0.443878
| 0.071348
| 0.0453
| 0.074745
| 0.165345
| 0.097395
| 0.038505
| 0.038505
| 0.038505
| 0
| 0
| 0.028293
| 0.15429
| 1,212
| 44
| 75
| 27.545455
| 0.833171
| 0.469472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.466667
| 1
| 0.466667
| true
| 0
| 0.066667
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
25ca9531414896379d7eab5ac76c01a0db18d4d7
| 209
|
py
|
Python
|
back.py
|
winius/chat-bot
|
7d44745c4544413e612e5cffb740890d248dd030
|
[
"Apache-2.0"
] | null | null | null |
back.py
|
winius/chat-bot
|
7d44745c4544413e612e5cffb740890d248dd030
|
[
"Apache-2.0"
] | null | null | null |
back.py
|
winius/chat-bot
|
7d44745c4544413e612e5cffb740890d248dd030
|
[
"Apache-2.0"
] | null | null | null |
import random
def main(vk, peer_id):
vk.messages.send(peer_id=peer_id, random_id=random.getrandbits(32), message='🌐 Главное меню', keyboard=open("keyboards/default.json", "r", encoding="UTF-8").read())
| 52.25
| 168
| 0.722488
| 33
| 209
| 4.484848
| 0.757576
| 0.121622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.095694
| 209
| 4
| 168
| 52.25
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.202899
| 0.10628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
25f1b3c5db5a9ef4e305a00f822a09c5477cee27
| 3,341
|
py
|
Python
|
slaid/commons/ecvl.py
|
mdrio/slaid
|
67c85f0d1702bced1c089bfb3c20ba1cfbc9c225
|
[
"MIT"
] | null | null | null |
slaid/commons/ecvl.py
|
mdrio/slaid
|
67c85f0d1702bced1c089bfb3c20ba1cfbc9c225
|
[
"MIT"
] | null | null | null |
slaid/commons/ecvl.py
|
mdrio/slaid
|
67c85f0d1702bced1c089bfb3c20ba1cfbc9c225
|
[
"MIT"
] | 1
|
2022-02-11T15:54:47.000Z
|
2022-02-11T15:54:47.000Z
|
# NAPARI LAZY OPENSLIDE
# Copyright (c) 2020, Trevor Manz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of napari-lazy-openslide nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from typing import List, Tuple
import numpy as np
from pyecvl.ecvl import Image as EcvlImage
from pyecvl.ecvl import OpenSlideImage
from slaid.commons import Image as BaseImage
from slaid.commons import ImageInfo
import slaid.commons.base as base
logger = logging.getLogger('ecvl')
class Image(BaseImage):
IMAGE_INFO = ImageInfo.create('rgb', 'yx', 'first')
def __init__(self, image: EcvlImage):
self._image = image
def to_array(self, image_info: ImageInfo = None):
# FIXME
array = np.array(self._image)
array = array.transpose(0, 2, 1)
if image_info is not None:
array = self.IMAGE_INFO.convert(array, image_info)
return array
@property
def dimensions(self) -> Tuple[int, int]:
return self._image.dims_
class BasicSlide(base.BasicSlide):
IMAGE_INFO = Image.IMAGE_INFO
def __init__(self, filename: str):
super().__init__(filename)
self._slide = OpenSlideImage(filename)
@property
def dimensions(self) -> Tuple[int, int]:
return tuple(self._slide.GetLevelsDimensions()[0])
def read_region(self, location: Tuple[int, int], level,
size: Tuple[int, int]) -> Image:
return Image(self._slide.ReadRegion(level, location + size))
def get_best_level_for_downsample(self, downsample: int):
return self._slide.GetBestLevelForDownsample(downsample)
@property
def level_dimensions(self) -> List[Tuple[int, int]]:
return [tuple(d) for d in self._slide.GetLevelsDimensions()]
@property
def level_downsamples(self):
return self._slide.GetLevelDownsamples()
def load(filename: str):
slide = BasicSlide(filename)
return slide
| 35.168421
| 81
| 0.724633
| 441
| 3,341
| 5.405896
| 0.421769
| 0.026426
| 0.02307
| 0.021393
| 0.123742
| 0.092282
| 0.092282
| 0.092282
| 0.057047
| 0.057047
| 0
| 0.002999
| 0.201437
| 3,341
| 94
| 82
| 35.542553
| 0.890555
| 0.459144
| 0
| 0.136364
| 0
| 0
| 0.007878
| 0
| 0
| 0
| 0
| 0.010638
| 0
| 1
| 0.227273
| false
| 0
| 0.181818
| 0.136364
| 0.681818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
d3421ebfdadd50a661ab3ff3c339c3f73135af13
| 53
|
py
|
Python
|
ch02/__init__.py
|
laszlokiraly/LearningAlgorithms
|
032a3cc409546619cf41220821d081cde54bbcce
|
[
"MIT"
] | 74
|
2021-05-06T22:03:18.000Z
|
2022-03-25T04:37:51.000Z
|
ch02/__init__.py
|
laszlokiraly/LearningAlgorithms
|
032a3cc409546619cf41220821d081cde54bbcce
|
[
"MIT"
] | null | null | null |
ch02/__init__.py
|
laszlokiraly/LearningAlgorithms
|
032a3cc409546619cf41220821d081cde54bbcce
|
[
"MIT"
] | 19
|
2021-07-16T11:42:00.000Z
|
2022-03-22T00:25:49.000Z
|
"""
Module containing Python code for Chapter 2.
"""
| 13.25
| 44
| 0.698113
| 7
| 53
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.169811
| 53
| 3
| 45
| 17.666667
| 0.818182
| 0.830189
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d359f498acf6ccc425859c61a4c4bd9b13cd05f5
| 72
|
py
|
Python
|
Python/InvalidType.py
|
udaypandey/BubblyCode
|
675fbcdd32c80f685ddb10ed4a5a92e17d139795
|
[
"MIT"
] | null | null | null |
Python/InvalidType.py
|
udaypandey/BubblyCode
|
675fbcdd32c80f685ddb10ed4a5a92e17d139795
|
[
"MIT"
] | null | null | null |
Python/InvalidType.py
|
udaypandey/BubblyCode
|
675fbcdd32c80f685ddb10ed4a5a92e17d139795
|
[
"MIT"
] | null | null | null |
def say(message, foobar):
print(message * foobar)
say("hello", 3)
| 12
| 27
| 0.638889
| 10
| 72
| 4.6
| 0.7
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.194444
| 72
| 5
| 28
| 14.4
| 0.775862
| 0
| 0
| 0
| 0
| 0
| 0.069444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
d368c91efbc90357720da91ab2b372a4d28b51b8
| 393
|
py
|
Python
|
log.py
|
ViggAlm/PasswordKit
|
b92f67f7b5b9623f1c63646003b684eee9e8912a
|
[
"BSD-2-Clause"
] | 1
|
2022-02-22T19:51:56.000Z
|
2022-02-22T19:51:56.000Z
|
log.py
|
ViggAlm/PasswordKit
|
b92f67f7b5b9623f1c63646003b684eee9e8912a
|
[
"BSD-2-Clause"
] | null | null | null |
log.py
|
ViggAlm/PasswordKit
|
b92f67f7b5b9623f1c63646003b684eee9e8912a
|
[
"BSD-2-Clause"
] | null | null | null |
from colorama import Fore
prefix = "[" + Fore.YELLOW + "PasswordKit" + Fore.WHITE + "]"
def result(text):
print(Fore.WHITE + f"{prefix}" + Fore.GREEN + f"{text}")
def error(text):
print(Fore.WHITE + f"{prefix}" + Fore.RED + f"{text}")
def general(text):
print(Fore.WHITE + f"{prefix}{text}")
def question(text):
print(Fore.WHITE + f"{prefix}" + Fore.CYAN + f"{text}")
| 19.65
| 61
| 0.608142
| 55
| 393
| 4.345455
| 0.345455
| 0.188285
| 0.217573
| 0.301255
| 0.468619
| 0.468619
| 0.364017
| 0
| 0
| 0
| 0
| 0
| 0.183206
| 393
| 19
| 62
| 20.684211
| 0.744548
| 0
| 0
| 0
| 0
| 0
| 0.175573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.1
| 0.1
| 0
| 0.5
| 0.4
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
d37cdc52c145b2070b33a144edb7a4a7f04e5af4
| 641
|
py
|
Python
|
pomodorr/projects/apps.py
|
kamil559/pomodorr
|
232e6e98ff3481561dd1235794b3960066713210
|
[
"MIT"
] | null | null | null |
pomodorr/projects/apps.py
|
kamil559/pomodorr
|
232e6e98ff3481561dd1235794b3960066713210
|
[
"MIT"
] | 15
|
2020-04-11T18:30:57.000Z
|
2020-07-05T09:37:43.000Z
|
pomodorr/projects/apps.py
|
kamil559/pomodorr
|
232e6e98ff3481561dd1235794b3960066713210
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ProjectsConfig(AppConfig):
name = 'pomodorr.projects'
verbose = _('Projects')
def ready(self):
try:
from pomodorr.projects.signals.dispatchers import notify_force_finish
from pomodorr.projects.signals.handlers import task_completed_notify_channel
notify_force_finish.connect(receiver=task_completed_notify_channel,
dispatch_uid='pomodorr.projects.signals.task_completed_notify_channel')
except ImportError:
pass # noqa F401
| 33.736842
| 111
| 0.692668
| 67
| 641
| 6.373134
| 0.567164
| 0.149883
| 0.161593
| 0.18267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006224
| 0.24805
| 641
| 18
| 112
| 35.611111
| 0.879668
| 0.014041
| 0
| 0
| 0
| 0
| 0.126984
| 0.087302
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.076923
| 0.384615
| 0
| 0.692308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 4
|
d383cd85a741d1d27af48818777ba3ae5e9dea05
| 537
|
py
|
Python
|
tests/pymath/test_visible_cubes.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-05-02T10:28:13.000Z
|
2019-02-06T09:10:11.000Z
|
tests/pymath/test_visible_cubes.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2017-06-21T20:39:14.000Z
|
2020-02-25T10:28:57.000Z
|
tests/pymath/test_visible_cubes.py
|
JASTYN/pythonmaster
|
46638ab09d28b65ce5431cd0759fe6df272fb85d
|
[
"Apache-2.0",
"MIT"
] | 2
|
2016-07-29T04:35:22.000Z
|
2017-01-18T17:05:36.000Z
|
import unittest
from pymath.visible_cubes import VisibleCubes
class VisibleCubesTest(unittest.TestCase):
def test_1(self):
self.assertEqual(VisibleCubes.not_visible_cubes(0), 0)
def test_2(self):
self.assertEqual(VisibleCubes.not_visible_cubes(1), 0)
def test_3(self):
self.assertEqual(VisibleCubes.not_visible_cubes(2), 0)
def test_4(self):
self.assertEqual(VisibleCubes.not_visible_cubes(3), 1)
def test_5(self):
self.assertEqual(VisibleCubes.not_visible_cubes(4), 8)
| 25.571429
| 62
| 0.72067
| 72
| 537
| 5.152778
| 0.305556
| 0.19407
| 0.256065
| 0.41779
| 0.619946
| 0.619946
| 0.619946
| 0
| 0
| 0
| 0
| 0.033937
| 0.176909
| 537
| 20
| 63
| 26.85
| 0.80543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.384615
| 1
| 0.384615
| false
| 0
| 0.153846
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
d3909bb5f50aa6a1cd42a85aa5f53b8ef59eb532
| 161
|
py
|
Python
|
m3gnet/trainers/__init__.py
|
materialsvirtuallab/m3gnet
|
94fb01d0c90d3b2bffcdc4514f7eb3cb8fab6c90
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T14:47:24.000Z
|
2022-03-31T14:47:24.000Z
|
m3gnet/trainers/__init__.py
|
materialsvirtuallab/m3gnet
|
94fb01d0c90d3b2bffcdc4514f7eb3cb8fab6c90
|
[
"BSD-3-Clause"
] | null | null | null |
m3gnet/trainers/__init__.py
|
materialsvirtuallab/m3gnet
|
94fb01d0c90d3b2bffcdc4514f7eb3cb8fab6c90
|
[
"BSD-3-Clause"
] | null | null | null |
"""M3GNet trainers"""
# -*- coding: utf-8 -*-
from ._potential import PotentialTrainer
from ._property import Trainer
__all__ = ["Trainer", "PotentialTrainer"]
| 23
| 41
| 0.720497
| 16
| 161
| 6.875
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014184
| 0.124224
| 161
| 6
| 42
| 26.833333
| 0.765957
| 0.236025
| 0
| 0
| 0
| 0
| 0.196581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
6c96573e2a86c4d06cd5e786e6a9c65c803e3b54
| 486
|
py
|
Python
|
tests/__init__.py
|
pedromtorres/TigerShark
|
2790a7c03905a094b126b48387c7919c09cce238
|
[
"BSD-3-Clause"
] | 24
|
2015-03-18T10:15:20.000Z
|
2022-03-18T13:38:34.000Z
|
tests/__init__.py
|
tspannhw/TigerShark
|
5081641f1b189a43e9eab4813256598cc0a79f6f
|
[
"BSD-3-Clause"
] | 6
|
2015-03-27T12:36:57.000Z
|
2021-04-13T15:01:24.000Z
|
tests/__init__.py
|
tspannhw/TigerShark
|
5081641f1b189a43e9eab4813256598cc0a79f6f
|
[
"BSD-3-Clause"
] | 21
|
2015-11-21T09:19:47.000Z
|
2020-09-17T16:52:50.000Z
|
#!/usr/bin/env python
"""The test package contains test data files as well as unit tests.
run_tests
===========
.. automodule:: test.run_tests
:members:
test_navigation
===============
.. automodule:: test.test_navigation
:members:
test_parse
===========
.. automodule:: test.test_parse
:members:
test_wsClaims
==============
.. automodule:: test.test_wsClaims
:members:
test_convert
=============
.. automodule:: test.test_convert
:members:
"""
| 13.885714
| 67
| 0.596708
| 51
| 486
| 5.490196
| 0.411765
| 0.25
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174897
| 486
| 34
| 68
| 14.294118
| 0.698254
| 0.979424
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
6c9da72f1e7cd9a44b07213708fdc729bbcbffaf
| 33
|
py
|
Python
|
Boolean/Boolean1.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
Boolean/Boolean1.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
Boolean/Boolean1.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
thisIsBool=True
print(thisIsBool)
| 16.5
| 17
| 0.878788
| 4
| 33
| 7.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 33
| 2
| 17
| 16.5
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
6c9ee4047b3a967591fcf09f72904d1bcbe40877
| 272
|
py
|
Python
|
Python POO/Getters e Setters/exemplo03/alarm.py
|
luccasocastro/Curso-Python
|
7ad2b980bb2f95f833811291273d6ca1beb0fe77
|
[
"MIT"
] | null | null | null |
Python POO/Getters e Setters/exemplo03/alarm.py
|
luccasocastro/Curso-Python
|
7ad2b980bb2f95f833811291273d6ca1beb0fe77
|
[
"MIT"
] | null | null | null |
Python POO/Getters e Setters/exemplo03/alarm.py
|
luccasocastro/Curso-Python
|
7ad2b980bb2f95f833811291273d6ca1beb0fe77
|
[
"MIT"
] | null | null | null |
class Alarme:
def __init__(self, estado: bool) -> None:
self.__estado = estado
def getEstado(self) -> bool:
return self.__estado
def setEstado(self, valor: bool) -> None:
if isinstance(valor, bool):
self.__estado = valor
| 22.666667
| 45
| 0.599265
| 31
| 272
| 4.935484
| 0.451613
| 0.261438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 272
| 11
| 46
| 24.727273
| 0.796875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0
| 0.125
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
6cc0464d548268e9ae806041a26f69d2278a7df3
| 1,559
|
py
|
Python
|
qiskit/_openquantumcompiler.py
|
christians94/qiskit-sdk-py
|
5c1c68a5aa3dcccdf5c10f9eb307383ebb40826b
|
[
"Apache-2.0"
] | null | null | null |
qiskit/_openquantumcompiler.py
|
christians94/qiskit-sdk-py
|
5c1c68a5aa3dcccdf5c10f9eb307383ebb40826b
|
[
"Apache-2.0"
] | 1
|
2018-08-08T17:56:06.000Z
|
2018-08-08T17:56:06.000Z
|
qiskit/_openquantumcompiler.py
|
christians94/qiskit-sdk-py
|
5c1c68a5aa3dcccdf5c10f9eb307383ebb40826b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=redefined-builtin
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tools for compiling Quantum Programs."""
from qiskit.unroll import DagUnroller, JsonBackend
# TODO: This is here for backward compatibility with QISKit Developer Challenge
# Once the challenge is finished, we have to remove this entire module.
def dag2json(dag_circuit, basis_gates='u1,u2,u3,cx,id'):
"""Make a Json representation of the circuit.
Takes a circuit dag and returns json circuit obj. This is an internal
function.
Args:
dag_circuit (QuantumCircuit): a dag representation of the circuit.
basis_gates (str): a comma seperated string and are the base gates,
which by default are: u1,u2,u3,cx,id
Returns:
json: the json version of the dag
"""
return DagUnroller(dag_circuit, JsonBackend(basis_gates.split(","))).execute()
| 42.135135
| 82
| 0.686337
| 214
| 1,559
| 4.971963
| 0.593458
| 0.056391
| 0.024436
| 0.030075
| 0.018797
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.189224
| 1,559
| 36
| 83
| 43.305556
| 0.829114
| 0.817832
| 0
| 0
| 0
| 0
| 0.068493
| 0
| 0
| 0
| 0
| 0.027778
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
6cd7b3f78c6ea020b669ed58c3c399d8c2e66207
| 590
|
py
|
Python
|
letter/models.py
|
HilmiZul/epkl3
|
63df215eb1676cf5ab2f36f2f20436b19b540b9a
|
[
"MIT"
] | 6
|
2019-02-15T07:15:33.000Z
|
2021-01-05T12:18:21.000Z
|
letter/models.py
|
HilmiZul/epkl3
|
63df215eb1676cf5ab2f36f2f20436b19b540b9a
|
[
"MIT"
] | 6
|
2019-09-14T14:47:48.000Z
|
2022-03-12T00:56:51.000Z
|
letter/models.py
|
HilmiZul/epkl3
|
63df215eb1676cf5ab2f36f2f20436b19b540b9a
|
[
"MIT"
] | null | null | null |
from django.db import models
from master.models import Siswa, Instansi
# Create your models here.
class Permohonan(models.Model):
nama_siswa = models.ForeignKey(Siswa, on_delete=models.CASCADE)
nama_instansi = models.ForeignKey(Instansi, on_delete=models.CASCADE)
def __str__(self):
return self.nama_instansi.nama
# class PermohonanTKJ(models.Model):
# nama_siswa = models.ForeignKey(Siswa, on_delete=models.CASCADE)
# nama_instansi = models.ForeignKey(InstansiTKJ, on_delete=models.CASCADE)
# def __str__(self):
# return self.nama_instansi.nama
| 32.777778
| 78
| 0.750847
| 75
| 590
| 5.666667
| 0.333333
| 0.150588
| 0.131765
| 0.197647
| 0.691765
| 0.691765
| 0.691765
| 0.691765
| 0.691765
| 0.691765
| 0
| 0
| 0.154237
| 590
| 17
| 79
| 34.705882
| 0.851703
| 0.450847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.285714
| 0.142857
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
6ce67f80393f8088cf44c387c9e1c5262727136a
| 581
|
py
|
Python
|
model/digamma.py
|
OpenXAIProject/Beta-Bernoulli-Dependent-Dropout
|
723df2d2392ec16eca3452d4afb81d54c4a2f841
|
[
"Apache-2.0"
] | 13
|
2018-11-29T05:56:11.000Z
|
2018-12-05T02:47:23.000Z
|
model/digamma.py
|
OpenXAIProject/Beta-Bernoulli-Dependent-Dropout
|
723df2d2392ec16eca3452d4afb81d54c4a2f841
|
[
"Apache-2.0"
] | null | null | null |
model/digamma.py
|
OpenXAIProject/Beta-Bernoulli-Dependent-Dropout
|
723df2d2392ec16eca3452d4afb81d54c4a2f841
|
[
"Apache-2.0"
] | 8
|
2018-11-30T00:42:27.000Z
|
2018-12-04T10:11:08.000Z
|
import tensorflow as tf
# @MISC {1446110,
# TITLE = {Approximating the Digamma function},
# AUTHOR = {njuffa (https://math.stackexchange.com/users/114200/njuffa)},
# HOWPUBLISHED = {Mathematics Stack Exchange},
# NOTE = {URL:https://math.stackexchange.com/q/1446110 (version: 2015-09-22)},
# EPRINT = {https://math.stackexchange.com/q/1446110},
# URL = {https://math.stackexchange.com/q/1446110}}
def digamma_approx(x):
def digamma_over_one(x):
return tf.log(x + 0.4849142940227510) \
- 1/(1.0271785180163817*x)
return digamma_over_one(x+1) - 1./x
| 38.733333
| 78
| 0.686747
| 77
| 581
| 5.116883
| 0.545455
| 0.091371
| 0.22335
| 0.253807
| 0.266497
| 0.266497
| 0.182741
| 0
| 0
| 0
| 0
| 0.159919
| 0.149742
| 581
| 14
| 79
| 41.5
| 0.637652
| 0.616179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 4
|
6cf35020ab985340d7a20cd326ad9113518513ba
| 91
|
py
|
Python
|
_downloads/plot_cubist_lena.py
|
dolfly/scipy-lectures.github.com
|
0e5babdf839754b075ded5d986f767be35fbbe65
|
[
"CC-BY-3.0"
] | null | null | null |
_downloads/plot_cubist_lena.py
|
dolfly/scipy-lectures.github.com
|
0e5babdf839754b075ded5d986f767be35fbbe65
|
[
"CC-BY-3.0"
] | null | null | null |
_downloads/plot_cubist_lena.py
|
dolfly/scipy-lectures.github.com
|
0e5babdf839754b075ded5d986f767be35fbbe65
|
[
"CC-BY-3.0"
] | null | null | null |
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
l = misc.lena()
| 15.166667
| 31
| 0.769231
| 16
| 91
| 4.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 91
| 5
| 32
| 18.2
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
9f03534c24da605d0a327352c085084a1edd3ca6
| 484
|
py
|
Python
|
odk2stata/dataset/__init__.py
|
PMA-2020/odk2stata
|
5178736eeedd4bad93bd35044a57cf071f282b4b
|
[
"MIT"
] | 2
|
2019-12-04T10:30:04.000Z
|
2022-03-23T11:07:07.000Z
|
odk2stata/dataset/__init__.py
|
PMA-2020/odk2stata
|
5178736eeedd4bad93bd35044a57cf071f282b4b
|
[
"MIT"
] | 3
|
2019-10-11T17:28:51.000Z
|
2022-01-09T06:07:08.000Z
|
odk2stata/dataset/__init__.py
|
jkpr/odk2stata
|
361b88d7fdd5751f16cd3ed5ceb7acdbde3bb82c
|
[
"MIT"
] | 3
|
2019-07-10T23:33:44.000Z
|
2021-12-18T06:25:53.000Z
|
"""A module to describe the dataset based on an ODK file.
This module describes three primary abstractions:
- DatasetCollection
- Dataset
- Column
An ODK file can have repeat groups. When the data are exported to CSV,
then those repeat groups become their own datasets. Therefore, the top
level is the DatasetCollection, which comprises of the primary Dataset
and repeat group Datasets. Each Dataset has Columns.
"""
from .dataset_collection import DatasetCollection
| 28.470588
| 70
| 0.778926
| 68
| 484
| 5.529412
| 0.705882
| 0.026596
| 0.047872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 484
| 16
| 71
| 30.25
| 0.949495
| 0.876033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
9f1f74457ba290ef289d78ba38eca46ee30f0a62
| 533
|
py
|
Python
|
utils/app_decorators.py
|
Omeramsc/hakesh-trom
|
bede21b96ea807ed78d5169ac5b4a917c15ce286
|
[
"MIT"
] | null | null | null |
utils/app_decorators.py
|
Omeramsc/hakesh-trom
|
bede21b96ea807ed78d5169ac5b4a917c15ce286
|
[
"MIT"
] | 4
|
2020-06-19T09:58:55.000Z
|
2022-02-13T16:20:28.000Z
|
utils/app_decorators.py
|
Omeramsc/hakesh-trom
|
bede21b96ea807ed78d5169ac5b4a917c15ce286
|
[
"MIT"
] | 1
|
2020-03-18T18:38:49.000Z
|
2020-03-18T18:38:49.000Z
|
from functools import wraps
from flask import redirect, abort
from flask_login import current_user
def admin_access(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.is_admin:
abort(403)
return f(*args, **kwargs)
return decorated_function
def user_access(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_admin:
abort(403)
return f(*args, **kwargs)
return decorated_function
| 22.208333
| 45
| 0.628518
| 66
| 533
| 4.893939
| 0.348485
| 0.210526
| 0.074303
| 0.080495
| 0.687307
| 0.687307
| 0.687307
| 0.687307
| 0.687307
| 0.687307
| 0
| 0.015707
| 0.283302
| 533
| 23
| 46
| 23.173913
| 0.829843
| 0
| 0
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.235294
| false
| 0
| 0.176471
| 0
| 0.647059
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
9f2f032fefbb445ef3a91589293fd83015bc8ef0
| 1,641
|
py
|
Python
|
Apartment_Renting_App/backend/views/about_page.py
|
sushilkplassar/csc848_LiveGator
|
e95d861679f1dfb8f547d5b9f7d9b7a4fa530c8d
|
[
"MIT"
] | null | null | null |
Apartment_Renting_App/backend/views/about_page.py
|
sushilkplassar/csc848_LiveGator
|
e95d861679f1dfb8f547d5b9f7d9b7a4fa530c8d
|
[
"MIT"
] | 1
|
2019-11-24T06:30:44.000Z
|
2019-11-24T06:30:44.000Z
|
Apartment_Renting_App/backend/views/about_page.py
|
pancreaspinch/LiveGator
|
680592aaf7a6c1603c0ae798a8094ca5f3ff250f
|
[
"MIT"
] | null | null | null |
####################################
# File name: about_page.py #
# Description:
# Author: Team-13 #
# Submission: Spring-2019 #
# Instructor: Dragutin Petkovic #
####################################
from flask import Flask, Blueprint, request, flash, url_for, redirect, render_template
from flask_login import login_user, logout_user, current_user , login_required
from werkzeug.security import check_password_hash, generate_password_hash
about_page_endpoints = Blueprint('about_page_endpoints', __name__)
@about_page_endpoints.route('/about', methods=['GET', 'POST'])
def about():
return render_template('about.html')
@about_page_endpoints.route('/about/AmarisAboutMe', methods=['GET', 'POST'])
def aboutAmarisAboutMe():
return render_template('about_AmarisAboutMe.html')
@about_page_endpoints.route('/about/kim', methods=['GET', 'POST'])
def aboutKim():
return render_template('about_Kim.html')
@about_page_endpoints.route('/about/sushil', methods=['GET', 'POST'])
def aboutSushil():
return render_template('about_sushil.html')
@about_page_endpoints.route('/about/Kurtis', methods=['GET', 'POST'])
def aboutKurtis():
return render_template('about_Kurtis.html')
@about_page_endpoints.route('/about/Adeel', methods=['GET', 'POST'])
def aboutAdeel():
return render_template('about_Adeel.html')
@about_page_endpoints.route('/about/simon', methods=['GET', 'POST'])
def aboutSimon():
return render_template('about_simon.html')
@about_page_endpoints.route('/about/brian', methods=['GET', 'POST'])
def aboutBrian():
return render_template('about_brian.html')
| 30.388889
| 86
| 0.697136
| 191
| 1,641
| 5.722513
| 0.308901
| 0.090576
| 0.164684
| 0.168344
| 0.230558
| 0.204941
| 0
| 0
| 0
| 0
| 0
| 0.004141
| 0.117002
| 1,641
| 54
| 87
| 30.388889
| 0.750173
| 0.087751
| 0
| 0
| 0
| 0
| 0.215297
| 0.016997
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.035714
| 0.107143
| 0.285714
| 0.678571
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
9f4c781ed13aa5ab9d4df3bec8dc6e44debfb804
| 51
|
py
|
Python
|
model/efficientNet/__init__.py
|
ztt0810/general_img_cls_template
|
2ae164d14e1abca1cdcf327acf306dd0415fd3ac
|
[
"MIT"
] | 1
|
2021-03-02T15:05:24.000Z
|
2021-03-02T15:05:24.000Z
|
model/efficientNet/__init__.py
|
ztt0810/general_img_cls_template
|
2ae164d14e1abca1cdcf327acf306dd0415fd3ac
|
[
"MIT"
] | null | null | null |
model/efficientNet/__init__.py
|
ztt0810/general_img_cls_template
|
2ae164d14e1abca1cdcf327acf306dd0415fd3ac
|
[
"MIT"
] | null | null | null |
from .efficientNet import *
from .utils import *
| 17
| 28
| 0.72549
| 6
| 51
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.196078
| 51
| 2
| 29
| 25.5
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
9f558eee486a98a8b26462c909884d369ed07d81
| 170
|
py
|
Python
|
FacebookChatPhisher/secretDirectory/secret.py
|
AshrafTaifour/Private-Facebook-Scraper
|
0fc72414792ec7a04770364cb036a0f14767069c
|
[
"MIT"
] | 2
|
2021-04-05T05:17:57.000Z
|
2021-06-27T07:46:32.000Z
|
FacebookChatPhisher/secretDirectory/secret.py
|
AshrafTaifour/Private-Facebook-Scraper
|
0fc72414792ec7a04770364cb036a0f14767069c
|
[
"MIT"
] | 2
|
2021-05-16T21:16:36.000Z
|
2021-07-30T14:37:38.000Z
|
FacebookChatPhisher/secretDirectory/secret.py
|
AshrafTaifour/Private-Facebook-Scraper
|
0fc72414792ec7a04770364cb036a0f14767069c
|
[
"MIT"
] | null | null | null |
EMAIL = "YOUR_FACEBOOK_EMAIL"
UNAME = "YOUR_FB_USER_NAME"
passw = r"YOUR_FACBOOK_PASSWORD"
torBrowserPath = 'PATH_TO_TORBROWSERDIRECTORY'
exePath = 'PATH_TO_DRIVER_EXE'
| 24.285714
| 46
| 0.817647
| 23
| 170
| 5.521739
| 0.782609
| 0.094488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 170
| 6
| 47
| 28.333333
| 0.824675
| 0
| 0
| 0
| 0
| 0
| 0.60355
| 0.284024
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
9f56a63924ec5c746a985d2d77f5a5ae47777582
| 231
|
py
|
Python
|
rename.py
|
samlarkin/bin
|
cc2dcf22b37e28f19a237ce4f073aabb539114cd
|
[
"BSD-3-Clause"
] | null | null | null |
rename.py
|
samlarkin/bin
|
cc2dcf22b37e28f19a237ce4f073aabb539114cd
|
[
"BSD-3-Clause"
] | null | null | null |
rename.py
|
samlarkin/bin
|
cc2dcf22b37e28f19a237ce4f073aabb539114cd
|
[
"BSD-3-Clause"
] | null | null | null |
import os
for fn in os.listdir():
if '.wiki' in fn:
new_fn = fn[:]
new_fn = new_fn.replace('.wiki', '')
new_fn = new_fn.replace('.', '')
new_fn = new_fn + '.wiki'
os.replace(fn, new_fn)
| 23.1
| 44
| 0.506494
| 35
| 231
| 3.114286
| 0.285714
| 0.366972
| 0.385321
| 0.275229
| 0.311927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316017
| 231
| 9
| 45
| 25.666667
| 0.689873
| 0
| 0
| 0
| 0
| 0
| 0.069264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9f69d1c4d6e28dbe64a9d4ed81782a9a6530a5e0
| 29
|
py
|
Python
|
connection_notifier/__init__.py
|
NNRepos/connection_alerter
|
463a4225618cc4155cd97d27f84b28930f68a175
|
[
"MIT"
] | null | null | null |
connection_notifier/__init__.py
|
NNRepos/connection_alerter
|
463a4225618cc4155cd97d27f84b28930f68a175
|
[
"MIT"
] | 1
|
2019-11-01T19:33:48.000Z
|
2021-11-14T19:36:43.000Z
|
connection_notifier/__init__.py
|
NNRepos/connection_alerter
|
463a4225618cc4155cd97d27f84b28930f68a175
|
[
"MIT"
] | null | null | null |
name = "connection_notifier"
| 14.5
| 28
| 0.793103
| 3
| 29
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9f8184849b20efb42772fe5c324d73b990a92874
| 265
|
py
|
Python
|
mindsdb/libs/data_types/tester_response.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | 1
|
2022-03-14T00:32:53.000Z
|
2022-03-14T00:32:53.000Z
|
mindsdb/libs/data_types/tester_response.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
mindsdb/libs/data_types/tester_response.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
class TesterResponse():
def __init__(self, error=0, accuracy =0 , predicted_targets={}, real_targets={}):
self.error = error
self.accuracy = accuracy
self.predicted_targets = predicted_targets
self.real_targets = real_targets
| 26.5
| 85
| 0.671698
| 29
| 265
| 5.793103
| 0.37931
| 0.285714
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.230189
| 265
| 9
| 86
| 29.444444
| 0.813725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
9f823af966072fa5ca7e3673da51ceeb153a05cf
| 624
|
py
|
Python
|
src/metrics/abstract_scorer.py
|
Hazoom/sede
|
2bf261248feb330889a9e84f74e33ff7df0d6a86
|
[
"Apache-2.0"
] | 65
|
2021-06-06T09:54:43.000Z
|
2022-02-28T08:15:02.000Z
|
src/metrics/abstract_scorer.py
|
Hazoom/sede
|
2bf261248feb330889a9e84f74e33ff7df0d6a86
|
[
"Apache-2.0"
] | null | null | null |
src/metrics/abstract_scorer.py
|
Hazoom/sede
|
2bf261248feb330889a9e84f74e33ff7df0d6a86
|
[
"Apache-2.0"
] | 10
|
2021-06-29T11:04:50.000Z
|
2022-02-12T08:15:49.000Z
|
from typing import List, Dict
from abc import ABC, abstractmethod
class AbstractScorer(ABC):
@abstractmethod
def get_name(self) -> str:
raise NotImplementedError()
@abstractmethod
def __call__(self, pred_lns: List[str], tgt_lns: List[str]) -> None:
raise NotImplementedError()
@abstractmethod
def get_metric(self, reset: bool = False) -> Dict[str, float]:
raise NotImplementedError()
@abstractmethod
def reset(self) -> None:
raise NotImplementedError()
@abstractmethod
def get_metric_names(self) -> List[str]:
raise NotImplementedError()
| 24.96
| 72
| 0.674679
| 66
| 624
| 6.227273
| 0.409091
| 0.206813
| 0.36983
| 0.399027
| 0.262774
| 0.262774
| 0.262774
| 0
| 0
| 0
| 0
| 0
| 0.227564
| 624
| 24
| 73
| 26
| 0.852697
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.277778
| false
| 0
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4c7d156f26e5e8a4d0712c00077b28beeea31a16
| 39
|
py
|
Python
|
pycotacao/__init__.py
|
CaioWzy/PyCotacao
|
6a536f35fad4c38db8ae116d29eb7a4ab4735778
|
[
"MIT"
] | 3
|
2020-02-08T05:44:39.000Z
|
2020-10-29T14:10:22.000Z
|
pycotacao/__init__.py
|
CaioWzy/PyCotacao
|
6a536f35fad4c38db8ae116d29eb7a4ab4735778
|
[
"MIT"
] | null | null | null |
pycotacao/__init__.py
|
CaioWzy/PyCotacao
|
6a536f35fad4c38db8ae116d29eb7a4ab4735778
|
[
"MIT"
] | 1
|
2021-03-16T01:48:59.000Z
|
2021-03-16T01:48:59.000Z
|
from .api import *
__version__ = "1.1"
| 13
| 19
| 0.666667
| 6
| 39
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.179487
| 39
| 3
| 19
| 13
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4c82fabe7b2f200f060085e375884907e7a22229
| 54
|
py
|
Python
|
riscv_mini/const.py
|
cdonovick/magma_riscv_mini
|
b7f39e09df28c6dde26ec427aae54aa9b88f1d11
|
[
"BSD-3-Clause"
] | 3
|
2021-04-13T18:52:09.000Z
|
2022-01-05T07:18:03.000Z
|
riscv_mini/const.py
|
cdonovick/magma_riscv_mini
|
b7f39e09df28c6dde26ec427aae54aa9b88f1d11
|
[
"BSD-3-Clause"
] | 1
|
2020-09-01T23:46:05.000Z
|
2020-09-09T19:13:08.000Z
|
riscv_mini/const.py
|
cdonovick/magma_riscv_mini
|
b7f39e09df28c6dde26ec427aae54aa9b88f1d11
|
[
"BSD-3-Clause"
] | 2
|
2021-04-16T17:15:18.000Z
|
2021-09-17T21:09:37.000Z
|
class Const:
PC_START = 0x200
PC_EVEC = 0x100
| 13.5
| 20
| 0.648148
| 8
| 54
| 4.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0.296296
| 54
| 3
| 21
| 18
| 0.657895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
4c94bef0d115389da735a1e29841834c3e9e1548
| 29,691
|
py
|
Python
|
test/test_socks.py
|
glowatsk/txtorcon
|
db8de75a4568561b118be11299bda61f3fb84719
|
[
"MIT"
] | 180
|
2015-01-12T23:57:06.000Z
|
2022-03-17T00:24:35.000Z
|
test/test_socks.py
|
glowatsk/txtorcon
|
db8de75a4568561b118be11299bda61f3fb84719
|
[
"MIT"
] | 262
|
2015-01-16T21:14:50.000Z
|
2022-02-25T01:33:42.000Z
|
test/test_socks.py
|
glowatsk/txtorcon
|
db8de75a4568561b118be11299bda61f3fb84719
|
[
"MIT"
] | 61
|
2015-01-05T01:10:57.000Z
|
2022-01-04T08:13:39.000Z
|
from six import BytesIO, text_type
from mock import Mock, patch
from twisted.trial import unittest
from twisted.internet import defer
from twisted.internet.address import IPv4Address
from twisted.internet.protocol import Protocol
from twisted.internet.interfaces import IStreamClientEndpoint
from twisted.test import proto_helpers
from twisted.test.iosim import connect, FakeTransport
from zope.interface import directlyProvides
from txtorcon import socks
class SocksStateMachine(unittest.TestCase):
def test_illegal_request(self):
with self.assertRaises(ValueError) as ctx:
socks._SocksMachine('FOO_RESOLVE', u'meejah.ca', 443)
self.assertTrue(
'Unknown request type' in str(ctx.exception)
)
def test_illegal_host(self):
with self.assertRaises(ValueError) as ctx:
socks._SocksMachine('RESOLVE', 1234, 443)
self.assertTrue(
"'host' must be" in str(ctx.exception)
)
def test_illegal_ip_addr(self):
with self.assertRaises(ValueError) as ctx:
socks._create_ip_address(1234, 443)
self.assertTrue(
"'host' must be" in str(ctx.exception)
)
def test_connect_but_no_creator(self):
with self.assertRaises(ValueError) as ctx:
socks._SocksMachine(
'CONNECT', u'foo.bar',
)
self.assertTrue(
"create_connection function required" in str(ctx.exception)
)
@defer.inlineCallbacks
def test_connect_socks_illegal_packet(self):
class BadSocksServer(Protocol):
def __init__(self):
self._buffer = b''
def dataReceived(self, data):
self._buffer += data
if len(self._buffer) == 3:
assert self._buffer == b'\x05\x01\x00'
self._buffer = b''
self.transport.write(b'\x05\x01\x01')
factory = socks._TorSocksFactory(u'meejah.ca', 1234, 'CONNECT', Mock())
server_proto = BadSocksServer()
server_transport = FakeTransport(server_proto, isServer=True)
client_proto = factory.buildProtocol('ignored')
client_transport = FakeTransport(client_proto, isServer=False)
pump = yield connect(
server_proto, server_transport,
client_proto, client_transport,
)
self.assertTrue(server_proto.transport.disconnected)
self.assertTrue(client_proto.transport.disconnected)
pump.flush()
@defer.inlineCallbacks
def test_connect_socks_unknown_version(self):
class BadSocksServer(Protocol):
def __init__(self):
self._buffer = b''
self._recv_stack = [
(b'\x05\x01\x00', b'\x05\xff'),
]
def dataReceived(self, data):
self._buffer += data
if len(self._recv_stack) == 0:
assert "not expecting any more data, got {}".format(repr(self._buffer))
return
expecting, to_send = self._recv_stack.pop(0)
got = self._buffer[:len(expecting)]
self._buffer = self._buffer[len(expecting):]
assert got == expecting, "wanted {} but got {}".format(repr(expecting), repr(got))
self.transport.write(to_send)
factory = socks._TorSocksFactory(u'1.2.3.4', 1234, 'CONNECT', Mock())
server_proto = BadSocksServer()
server_transport = FakeTransport(server_proto, isServer=True)
client_proto = factory.buildProtocol('ignored')
client_transport = FakeTransport(client_proto, isServer=False)
# returns IOPump
yield connect(
server_proto, server_transport,
client_proto, client_transport,
)
self.assertTrue(server_proto.transport.disconnected)
self.assertTrue(client_proto.transport.disconnected)
@defer.inlineCallbacks
def test_connect_socks_unknown_reply_code(self):
class BadSocksServer(Protocol):
def __init__(self):
self._buffer = b''
self._recv_stack = [
(b'\x05\x01\x00', b'\x05\x00'),
# the \xff is an invalid reply-code
(b'\x05\x01\x00\x01\x01\x02\x03\x04\x04\xd2', b'\x05\xff\x00\x04\x01\x01\x01\x01'),
]
def dataReceived(self, data):
self._buffer += data
if len(self._recv_stack) == 0:
assert "not expecting any more data, got {}".format(repr(self._buffer))
return
expecting, to_send = self._recv_stack.pop(0)
got = self._buffer[:len(expecting)]
self._buffer = self._buffer[len(expecting):]
assert got == expecting, "wanted {} but got {}".format(repr(expecting), repr(got))
self.transport.write(to_send)
factory = socks._TorSocksFactory(u'1.2.3.4', 1234, 'CONNECT', Mock())
server_proto = BadSocksServer()
server_transport = FakeTransport(server_proto, isServer=True)
client_proto = factory.buildProtocol('ignored')
client_transport = FakeTransport(client_proto, isServer=False)
d = client_proto._machine.when_done()
# returns IOPump
yield connect(
server_proto, server_transport,
client_proto, client_transport,
)
with self.assertRaises(Exception) as ctx:
yield d
self.assertIn('Unknown SOCKS error-code', str(ctx.exception))
@defer.inlineCallbacks
def test_socks_relay_data(self):
class BadSocksServer(Protocol):
def __init__(self):
self._buffer = b''
self._recv_stack = [
(b'\x05\x01\x00', b'\x05\x00'),
(b'\x05\x01\x00\x01\x01\x02\x03\x04\x04\xd2', b'\x05\x00\x00\x01\x01\x02\x03\x04\x12\x34'),
]
def dataReceived(self, data):
self._buffer += data
if len(self._recv_stack) == 0:
assert "not expecting any more data, got {}".format(repr(self._buffer))
return
expecting, to_send = self._recv_stack.pop(0)
got = self._buffer[:len(expecting)]
self._buffer = self._buffer[len(expecting):]
assert got == expecting, "wanted {} but got {}".format(repr(expecting), repr(got))
self.transport.write(to_send)
factory = socks._TorSocksFactory(u'1.2.3.4', 1234, 'CONNECT', Mock())
server_proto = BadSocksServer()
server_transport = FakeTransport(server_proto, isServer=True)
client_proto = factory.buildProtocol('ignored')
client_transport = FakeTransport(client_proto, isServer=False)
pump = yield connect(
server_proto, server_transport,
client_proto, client_transport,
)
# should be relaying now, try sending some datas
client_proto.transport.write(b'abcdef')
pump.flush()
self.assertEqual(b'abcdef', server_proto._buffer)
@defer.inlineCallbacks
def test_socks_ipv6(self):
class BadSocksServer(Protocol):
def __init__(self):
self._buffer = b''
self._recv_stack = [
(b'\x05\x01\x00', b'\x05\x00'),
(b'\x05\x01\x00\x04\x20\x02\x44\x93\x04\xd2',
b'\x05\x00\x00\x04' + (b'\x00' * 16) + b'\xbe\xef'),
]
def dataReceived(self, data):
self._buffer += data
if len(self._recv_stack) == 0:
assert "not expecting any more data, got {}".format(repr(self._buffer))
return
expecting, to_send = self._recv_stack.pop(0)
got = self._buffer[:len(expecting)]
self._buffer = self._buffer[len(expecting):]
assert got == expecting, "wanted {} but got {}".format(repr(expecting), repr(got))
self.transport.write(to_send)
factory = socks._TorSocksFactory(u'2002:4493:5105::a299:9bff:fe0e:4471', 1234, 'CONNECT', Mock())
server_proto = BadSocksServer()
expected_address = object()
server_transport = FakeTransport(server_proto, isServer=True)
client_proto = factory.buildProtocol(u'ignored')
client_transport = FakeTransport(client_proto, isServer=False, hostAddress=expected_address)
pump = yield connect(
server_proto, server_transport,
client_proto, client_transport,
)
# should be relaying now, try sending some datas
client_proto.transport.write(b'abcdef')
addr = yield factory._get_address()
# FIXME how shall we test for IPv6-ness?
assert addr is expected_address
pump.flush()
self.assertEqual(b'abcdef', server_proto._buffer)
def test_end_to_end_wrong_method(self):
dis = []
def on_disconnect(error_message):
dis.append(error_message)
sm = socks._SocksMachine('RESOLVE', u'meejah.ca', 443, on_disconnect=on_disconnect)
sm.connection()
sm.feed_data(b'\x05')
sm.feed_data(b'\x01')
# we should have sent the request to the server, and nothing
# else (because we disconnected)
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00',
data.getvalue(),
)
self.assertEqual(1, len(dis))
self.assertEqual("Wanted method 0 or 2, got 1", dis[0])
def test_end_to_end_wrong_version(self):
dis = []
def on_disconnect(error_message):
dis.append(error_message)
sm = socks._SocksMachine('RESOLVE', u'meejah.ca', 443, on_disconnect=on_disconnect)
sm.connection()
sm.feed_data(b'\x06')
sm.feed_data(b'\x00')
# we should have sent the request to the server, and nothing
# else (because we disconnected)
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00',
data.getvalue(),
)
self.assertEqual(1, len(dis))
self.assertEqual("Expected version 5, got 6", dis[0])
def test_end_to_end_connection_refused(self):
dis = []
def on_disconnect(error_message):
dis.append(error_message)
sm = socks._SocksMachine(
'CONNECT', u'1.2.3.4', 443,
on_disconnect=on_disconnect,
create_connection=lambda a, p: None,
)
sm.connection()
sm.feed_data(b'\x05')
sm.feed_data(b'\x00')
# reply with 'connection refused'
sm.feed_data(b'\x05\x05\x00\x01\x00\x00\x00\x00\xff\xff')
self.assertEqual(1, len(dis))
self.assertEqual(socks.ConnectionRefusedError.message, dis[0])
def test_end_to_end_successful_relay(self):
class Proto(object):
data = b''
lost = []
def dataReceived(self, d):
self.data = self.data + d
def connectionLost(self, reason):
self.lost.append(reason)
the_proto = Proto()
dis = []
def on_disconnect(error_message):
dis.append(error_message)
sm = socks._SocksMachine(
'CONNECT', u'1.2.3.4', 443,
on_disconnect=on_disconnect,
create_connection=lambda a, p: the_proto,
)
sm.connection()
sm.feed_data(b'\x05')
sm.feed_data(b'\x00')
# reply with success, port 0x1234
sm.feed_data(b'\x05\x00\x00\x01\x00\x00\x00\x00\x12\x34')
# now some data that should get relayed
sm.feed_data(b'this is some relayed data')
# should *not* have disconnected
self.assertEqual(0, len(dis))
self.assertTrue(the_proto.data, b"this is some relayed data")
sm.disconnected(socks.SocksError("it's fine"))
self.assertEqual(1, len(Proto.lost))
self.assertTrue("it's fine" in str(Proto.lost[0]))
def test_end_to_end_success(self):
sm = socks._SocksMachine('RESOLVE', u'meejah.ca', 443)
sm.connection()
sm.feed_data(b'\x05')
sm.feed_data(b'\x00')
# now we check we got the right bytes out the other side
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\xf0\x00\x03\tmeejah.ca\x00\x00',
data.getvalue(),
)
def test_end_to_end_connect_and_relay(self):
sm = socks._SocksMachine(
'CONNECT', u'1.2.3.4', 443,
create_connection=lambda a, p: None,
)
sm.connection()
sm.feed_data(b'\x05')
sm.feed_data(b'\x00')
sm.feed_data(b'some relayed data')
# now we check we got the right bytes out the other side
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\x01\x00\x01\x01\x02\x03\x04\x01\xbb',
data.getvalue(),
)
def test_resolve(self):
# kurt: most things use (hsot, port) tuples, this probably
# should too
sm = socks._SocksMachine('RESOLVE', u'meejah.ca', 443)
sm.connection()
sm.version_reply(0x00)
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\xf0\x00\x03\tmeejah.ca\x00\x00',
data.getvalue(),
)
@defer.inlineCallbacks
def test_resolve_with_reply(self):
# kurt: most things use (hsot, port) tuples, this probably
# should too
sm = socks._SocksMachine('RESOLVE', u'meejah.ca', 443)
sm.connection()
sm.version_reply(0x00)
# make sure the state-machine wanted to send out the correct
# request.
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\xf0\x00\x03\tmeejah.ca\x00\x00',
data.getvalue(),
)
# now feed it a reply (but not enough to parse it yet!)
d = sm.when_done()
# ...we have to send at least 8 bytes, but NOT the entire hostname
sm.feed_data(b'\x05\x00\x00\x03')
sm.feed_data(b'\x06meeja')
self.assertTrue(not d.called)
# now send the rest, checking the buffering in _parse_domain_name_reply
sm.feed_data(b'h\x00\x00')
self.assertTrue(d.called)
answer = yield d
# XXX answer *should* be not-bytes, though I think
self.assertEqual(b'meejah', answer)
@defer.inlineCallbacks
def test_unknown_response_type(self):
# kurt: most things use (hsot, port) tuples, this probably
# should too
sm = socks._SocksMachine('RESOLVE', u'meejah.ca', 443)
sm.connection()
# don't actually support username/password (which is version 0x02) yet
# sm.version_reply(0x02)
sm.version_reply(0)
# make sure the state-machine wanted to send out the correct
# request.
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\xf0\x00\x03\tmeejah.ca\x00\x00',
data.getvalue(),
)
sm.feed_data(b'\x05\x00\x00\xaf\x00\x00\x00\x00')
with self.assertRaises(socks.SocksError) as ctx:
yield sm.when_done()
self.assertTrue('Unexpected response type 175' in str(ctx.exception))
@defer.inlineCallbacks
def test_resolve_ptr(self):
sm = socks._SocksMachine('RESOLVE_PTR', u'1.2.3.4', 443)
sm.connection()
sm.version_reply(0x00)
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\xf1\x00\x01\x01\x02\x03\x04\x00\x00',
data.getvalue(),
)
sm.feed_data(
b'\x05\x00\x00\x01\x00\x01\x02\xff\x12\x34'
)
addr = yield sm.when_done()
self.assertEqual('0.1.2.255', addr)
def test_connect(self):
sm = socks._SocksMachine(
'CONNECT', u'1.2.3.4', 443,
create_connection=lambda a, p: None,
)
sm.connection()
sm.version_reply(0x00)
data = BytesIO()
sm.send_data(data.write)
self.assertEqual(
b'\x05\x01\x00'
b'\x05\x01\x00\x01\x01\x02\x03\x04\x01\xbb',
data.getvalue(),
)
# XXX should re-write (at LEAST) these to use Twisted's IOPump
class SocksConnectTests(unittest.TestCase):
@defer.inlineCallbacks
def test_connect_no_tls(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443)
proto = yield ep.connect(factory)
self.assertEqual(proto, protocol)
@defer.inlineCallbacks
def test_connect_deferred_proxy(self):
socks_ep = Mock()
directlyProvides(socks_ep, IStreamClientEndpoint)
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(
socks_endpoint=defer.succeed(socks_ep),
host=u'meejah.ca',
port=443,
)
proto = yield ep.connect(factory)
self.assertEqual(proto, protocol)
@defer.inlineCallbacks
def test_connect_deferred_proxy_wrong_return(self):
class NotAnEndpoint(object):
"definitely doesn't implement IStreamClientEndpoint"
not_an_endpoint = NotAnEndpoint()
factory = Mock()
ep = socks.TorSocksEndpoint(
socks_endpoint=defer.succeed(not_an_endpoint),
host=u'meejah.ca',
port=443,
)
with self.assertRaises(ValueError) as ctx:
yield ep.connect(factory)
self.assertIn(
"must resolve to an IStreamClientEndpoint provider",
str(ctx.exception),
)
@defer.inlineCallbacks
def test_connect_tls(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443, tls=True)
proto = yield ep.connect(factory)
self.assertEqual(proto, protocol)
@defer.inlineCallbacks
def test_connect_tls_context(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
from OpenSSL import SSL
class CertificateOptions(object):
def getContext(self, *args, **kw):
return SSL.Context(SSL.TLSv1_METHOD)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443, tls=CertificateOptions())
proto = yield ep.connect(factory)
self.assertEqual(proto, protocol)
@defer.inlineCallbacks
def test_connect_socks_error(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x01\x00\x01\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443, tls=True)
with self.assertRaises(Exception) as ctx:
yield ep.connect(factory)
self.assertTrue(isinstance(ctx.exception,
socks.GeneralServerFailureError))
@defer.inlineCallbacks
def test_connect_socks_error_unknown(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\xff\x00\x01\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443, tls=True)
with self.assertRaises(Exception) as ctx:
yield ep.connect(factory)
self.assertTrue('Unknown SOCKS error-code' in str(ctx.exception))
@defer.inlineCallbacks
def test_connect_socks_illegal_byte(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x01\x00\x01\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443, tls=True)
with self.assertRaises(Exception) as ctx:
yield ep.connect(factory)
self.assertTrue(isinstance(ctx.exception,
socks.GeneralServerFailureError))
@defer.inlineCallbacks
def test_get_address_endpoint(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
delayed_addr = []
def connect(factory):
delayed_addr.append(factory._get_address())
delayed_addr.append(factory._get_address())
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
self.assertEqual(b'\x05\x01\x00', transport.value())
proto.dataReceived(b'\x05\x00')
proto.dataReceived(b'\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00')
return proto
socks_ep.connect = connect
protocol = Mock()
factory = Mock()
factory.buildProtocol = Mock(return_value=protocol)
ep = socks.TorSocksEndpoint(socks_ep, u'meejah.ca', 443, tls=True)
yield ep.connect(factory)
addr = yield ep._get_address()
self.assertEqual(addr, IPv4Address('TCP', '10.0.0.1', 12345))
self.assertEqual(2, len(delayed_addr))
self.assertTrue(delayed_addr[0] is not delayed_addr[1])
self.assertTrue(all([d.called for d in delayed_addr]))
@defer.inlineCallbacks
def test_get_address(self):
# normally, ._get_address is only called via the
# attach_stream() method on Circuit
addr = object()
factory = socks._TorSocksFactory()
d = factory._get_address()
self.assertFalse(d.called)
factory._did_connect(addr)
maybe_addr = yield d
self.assertEqual(addr, maybe_addr)
# if we do it a second time, should be immediate
d = factory._get_address()
self.assertTrue(d.called)
self.assertEqual(d.result, addr)
class SocksResolveTests(unittest.TestCase):
@defer.inlineCallbacks
def test_resolve(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
# XXX sadness: we probably "should" just feed the right
# bytes to the protocol to convince it a connection is
# made ... *or* we can cheat and just do the callback
# directly...
proto._machine._when_done.fire("the dns answer")
return proto
socks_ep.connect = connect
hn = yield socks.resolve(socks_ep, u'meejah.ca')
self.assertEqual(hn, "the dns answer")
@defer.inlineCallbacks
def test_resolve_ptr(self):
socks_ep = Mock()
transport = proto_helpers.StringTransport()
def connect(factory):
factory.startFactory()
proto = factory.buildProtocol("addr")
proto.makeConnection(transport)
# XXX sadness: we probably "should" just feed the right
# bytes to the protocol to convince it a connection is
# made ... *or* we can cheat and just do the callback
# directly...
proto._machine._when_done.fire(u"the dns answer")
return proto
socks_ep.connect = connect
hn = yield socks.resolve_ptr(socks_ep, u'meejah.ca')
self.assertEqual(hn, "the dns answer")
@patch('txtorcon.socks._TorSocksFactory')
def test_resolve_ptr_str(self, fac):
socks_ep = Mock()
d = socks.resolve_ptr(socks_ep, 'meejah.ca')
self.assertEqual(1, len(fac.mock_calls))
self.assertTrue(
isinstance(fac.mock_calls[0][1][0], text_type)
)
return d
@patch('txtorcon.socks._TorSocksFactory')
def test_resolve_str(self, fac):
socks_ep = Mock()
d = socks.resolve(socks_ep, 'meejah.ca')
self.assertEqual(1, len(fac.mock_calls))
self.assertTrue(
isinstance(fac.mock_calls[0][1][0], text_type)
)
return d
@patch('txtorcon.socks._TorSocksFactory')
def test_resolve_ptr_bytes(self, fac):
socks_ep = Mock()
d = socks.resolve_ptr(socks_ep, b'meejah.ca')
self.assertEqual(1, len(fac.mock_calls))
self.assertTrue(
isinstance(fac.mock_calls[0][1][0], text_type)
)
return d
@patch('txtorcon.socks._TorSocksFactory')
def test_resolve_bytes(self, fac):
socks_ep = Mock()
d = socks.resolve(socks_ep, b'meejah.ca')
self.assertEqual(1, len(fac.mock_calls))
self.assertTrue(
isinstance(fac.mock_calls[0][1][0], text_type)
)
return d
class SocksErrorTests(unittest.TestCase):
def _check_error(self, error, cls_, code, message):
self.assertTrue(isinstance(error, cls_))
self.assertEqual(error.code, code)
self.assertEqual(error.message, message)
self.assertEqual(str(error), message)
def test_error_factory(self):
for cls in socks.SocksError.__subclasses__():
error = socks._create_socks_error(cls.code)
self._check_error(error, cls, cls.code, cls.message)
def test_custom_error(self):
code = 0xFF
message = 'Custom error message'
self._check_error(socks.SocksError(message),
socks.SocksError, None, message)
self._check_error(socks.SocksError(message=message),
socks.SocksError, None, message)
self._check_error(socks.SocksError(code=code),
socks.SocksError, code, '')
self._check_error(socks.SocksError(message, code=code),
socks.SocksError, code, message)
self._check_error(socks.SocksError(message=message, code=code),
socks.SocksError, code, message)
| 35.686298
| 111
| 0.596376
| 3,414
| 29,691
| 5.039836
| 0.099004
| 0.015343
| 0.016738
| 0.016855
| 0.79141
| 0.772696
| 0.746019
| 0.707834
| 0.687086
| 0.66843
| 0
| 0.041999
| 0.294298
| 29,691
| 831
| 112
| 35.729242
| 0.779172
| 0.063049
| 0
| 0.689498
| 0
| 0
| 0.099475
| 0.039567
| 0
| 0
| 0.000719
| 0.001203
| 0.141553
| 1
| 0.100457
| false
| 0
| 0.018265
| 0.001522
| 0.165906
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4cb04dfc726d4ac1800ff0dd0d01f5c28f942726
| 364
|
py
|
Python
|
python/testData/inspections/PyDataclassInspection/comparisonForManuallyOrdered.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyDataclassInspection/comparisonForManuallyOrdered.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyDataclassInspection/comparisonForManuallyOrdered.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from dataclasses import dataclass
@dataclass(order=False)
class Test1:
def __gt__(self, other):
pass
@dataclass
class Test2:
def __gt__(self, other):
pass
print(Test1() < Test1())
print(Test2() < Test2())
print(Test1() > Test1())
print(Test2() > Test2())
print(Test1 < Test1)
print(Test2 < Test2)
print(Test1 > Test1)
print(Test2 > Test2)
| 15.826087
| 33
| 0.664835
| 46
| 364
| 5.086957
| 0.326087
| 0.17094
| 0.25641
| 0.34188
| 0.666667
| 0.512821
| 0.512821
| 0.512821
| 0.512821
| 0.512821
| 0
| 0.060606
| 0.184066
| 364
| 23
| 34
| 15.826087
| 0.727273
| 0
| 0
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0.117647
| 0.058824
| 0
| 0.294118
| 0.470588
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 4
|
4cb8191cdc92d1a5f9c909f5adfaf89d0827d41c
| 93
|
py
|
Python
|
test/test_getfiles.py
|
Zhu-Jianwei/LaTeX-helper
|
322bf686d1aee32804013d813d46bd27a4a8325f
|
[
"MIT"
] | 4
|
2022-03-13T12:02:38.000Z
|
2022-03-13T15:30:20.000Z
|
test/test_getfiles.py
|
Zhu-Jianwei/LaTeX-helper
|
322bf686d1aee32804013d813d46bd27a4a8325f
|
[
"MIT"
] | null | null | null |
test/test_getfiles.py
|
Zhu-Jianwei/LaTeX-helper
|
322bf686d1aee32804013d813d46bd27a4a8325f
|
[
"MIT"
] | 3
|
2022-02-06T08:05:37.000Z
|
2022-02-07T08:26:48.000Z
|
from utils.fileio import *
if __name__ == '__main__':
print(get_tex_list_recursive('.'))
| 23.25
| 38
| 0.709677
| 12
| 93
| 4.583333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 93
| 4
| 38
| 23.25
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0.095745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4cc547610eb48bfe2850a07303a37c73843e903c
| 17
|
py
|
Python
|
src/passpredict/_version.py
|
samtx/pass-predictor
|
6577f75cd7d64bd3c12a9512880d4b29c2682b4c
|
[
"MIT"
] | 4
|
2017-01-31T07:12:48.000Z
|
2018-12-02T21:30:14.000Z
|
layer-atlas/version.gradle
|
netguru/Atlas-Android
|
7a3ca807c8d641346bcb73811cb0d308dca16efe
|
[
"Apache-2.0"
] | null | null | null |
layer-atlas/version.gradle
|
netguru/Atlas-Android
|
7a3ca807c8d641346bcb73811cb0d308dca16efe
|
[
"Apache-2.0"
] | null | null | null |
version = '0.5.0'
| 17
| 17
| 0.588235
| 4
| 17
| 2.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.117647
| 17
| 1
| 17
| 17
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
4cee5af5f6e26e8e12d1a1f706296d1d58e7b45d
| 162
|
py
|
Python
|
checkers/5Go/client/__init__.py
|
C4T-BuT-S4D/stay-home-ctf-2022
|
7084f53a69e3e25d62a1aaad8b7dfe1cd663a4ec
|
[
"WTFPL"
] | 4
|
2022-02-06T19:56:46.000Z
|
2022-02-12T15:21:47.000Z
|
services/5Go/client/__init__.py
|
C4T-BuT-S4D/stay-home-ctf-2022
|
7084f53a69e3e25d62a1aaad8b7dfe1cd663a4ec
|
[
"WTFPL"
] | null | null | null |
services/5Go/client/__init__.py
|
C4T-BuT-S4D/stay-home-ctf-2022
|
7084f53a69e3e25d62a1aaad8b7dfe1cd663a4ec
|
[
"WTFPL"
] | 2
|
2022-02-07T09:59:47.000Z
|
2022-02-07T10:22:20.000Z
|
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).absolute().parent / 'proto'))
from .daeh5 import Daeh5 # noqa
__all__ = ('Daeh5',)
| 16.2
| 67
| 0.703704
| 23
| 162
| 4.608696
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 0.148148
| 162
| 9
| 68
| 18
| 0.73913
| 0.024691
| 0
| 0
| 0
| 0
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
4cef4c98ae09635815699f63ee6fc37238126027
| 165
|
py
|
Python
|
tests/extmod/ure_debug.py
|
TG-Techie/circuitpython
|
390295dd218fb705fe652de77132dea472adf1ed
|
[
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 3
|
2020-01-09T21:50:22.000Z
|
2020-01-15T08:27:48.000Z
|
tests/extmod/ure_debug.py
|
TG-Techie/circuitpython
|
390295dd218fb705fe652de77132dea472adf1ed
|
[
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | null | null | null |
tests/extmod/ure_debug.py
|
TG-Techie/circuitpython
|
390295dd218fb705fe652de77132dea472adf1ed
|
[
"MIT",
"BSD-3-Clause",
"MIT-0",
"Unlicense"
] | 1
|
2020-01-11T12:42:41.000Z
|
2020-01-11T12:42:41.000Z
|
# test printing debugging info when compiling
try:
import ure
except ImportError:
print("SKIP")
raise SystemExit
ure.compile("^a|b[0-9]\w$", ure.DEBUG)
| 18.333333
| 45
| 0.690909
| 24
| 165
| 4.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014815
| 0.181818
| 165
| 8
| 46
| 20.625
| 0.82963
| 0.260606
| 0
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
4cf5f417e64e0bebe31c66283110f507586b44ab
| 444
|
py
|
Python
|
Handlers/BaseHandler.py
|
Nuit-De-L-Info-2016-STRI-DL/Backend
|
418f8411cd556c6fc96c7f19c976560e773e35f0
|
[
"MIT"
] | null | null | null |
Handlers/BaseHandler.py
|
Nuit-De-L-Info-2016-STRI-DL/Backend
|
418f8411cd556c6fc96c7f19c976560e773e35f0
|
[
"MIT"
] | 3
|
2016-12-01T20:37:56.000Z
|
2017-09-28T11:02:52.000Z
|
Handlers/BaseHandler.py
|
Nuit-De-L-Info-2016-STRI-DL/Backend
|
418f8411cd556c6fc96c7f19c976560e773e35f0
|
[
"MIT"
] | null | null | null |
import tornado
class BaseHandler(tornado.web.RequestHandler):
"""Superclass for Handlers which require a connected user
"""
@property
def redis_client(self):
return self.application.redis_client
def get_current_user(self):
"""Get current connected user
:return: current connected user
"""
return self.get_secure_cookie("user")
def get(self):
self.render('404.html')
| 21.142857
| 61
| 0.653153
| 51
| 444
| 5.568627
| 0.54902
| 0.137324
| 0.140845
| 0.183099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009036
| 0.252252
| 444
| 20
| 62
| 22.2
| 0.846386
| 0.265766
| 0
| 0
| 0
| 0
| 0.040268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.111111
| 0.111111
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 4
|
980ae279fc6ff666cb86472b0e41e07b61f192a0
| 219
|
py
|
Python
|
master/routers/spiders.py
|
sdulsj/spider_platform
|
8850fdfc1bb12817ef47da89856da68a7a52fd2e
|
[
"MIT"
] | 7
|
2018-08-17T09:04:18.000Z
|
2021-10-05T17:02:28.000Z
|
master/routers/spiders.py
|
parker-pu/spider_platform
|
8850fdfc1bb12817ef47da89856da68a7a52fd2e
|
[
"MIT"
] | 1
|
2021-06-04T05:50:43.000Z
|
2021-06-04T05:50:43.000Z
|
master/routers/spiders.py
|
parker-pu/spider_platform
|
8850fdfc1bb12817ef47da89856da68a7a52fd2e
|
[
"MIT"
] | 3
|
2019-03-21T09:40:45.000Z
|
2020-05-09T13:27:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @Date : 2018/8/15
# @Author: lsj
# @File : views.py
# @Desc :
默认Python版本支持:3.6
"""
from app.routers import blueprint_spider
from flask_login import login_required
| 18.25
| 40
| 0.6621
| 32
| 219
| 4.4375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054645
| 0.164384
| 219
| 11
| 41
| 19.909091
| 0.721311
| 0.579909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 4
|
e24923e9e8b668ee62513b6705dcec9a67c54140
| 138
|
py
|
Python
|
ldndc2nc/__main__.py
|
deekaey/ldndc2nc
|
43dcf4ec1175fe9535c0182cefc1553db18f7472
|
[
"BSD-3-Clause"
] | null | null | null |
ldndc2nc/__main__.py
|
deekaey/ldndc2nc
|
43dcf4ec1175fe9535c0182cefc1553db18f7472
|
[
"BSD-3-Clause"
] | null | null | null |
ldndc2nc/__main__.py
|
deekaey/ldndc2nc
|
43dcf4ec1175fe9535c0182cefc1553db18f7472
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""ldndc2nc.__main__: executed when ldndc2nc directory is called as script."""
from .ldndc2nc import main
main()
| 23
| 78
| 0.702899
| 18
| 138
| 5.166667
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.144928
| 138
| 5
| 79
| 27.6
| 0.754237
| 0.688406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
e24ae2ac9ca6b814c6d97818e0f21f67872cd6dc
| 81
|
py
|
Python
|
blog_embed/__init__.py
|
insaction-dev/markdown-blog-embed
|
a45fd7641d600a203b778c9d4d6e7b07460bbfe3
|
[
"MIT"
] | null | null | null |
blog_embed/__init__.py
|
insaction-dev/markdown-blog-embed
|
a45fd7641d600a203b778c9d4d6e7b07460bbfe3
|
[
"MIT"
] | null | null | null |
blog_embed/__init__.py
|
insaction-dev/markdown-blog-embed
|
a45fd7641d600a203b778c9d4d6e7b07460bbfe3
|
[
"MIT"
] | null | null | null |
"""Allows custom embeds of other websites' data through simple link copy-paste"""
| 81
| 81
| 0.777778
| 12
| 81
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 81
| 1
| 81
| 81
| 0.887324
| 0.925926
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e286c0d52f3f7354d99fd6d4b897ffa7143497f8
| 87
|
py
|
Python
|
apps/zabbix/apps.py
|
ykyk1229/TurtleDove
|
074ed03396d603a920bc382ee916fc0f3adab6ea
|
[
"MIT"
] | 3
|
2020-06-11T10:57:22.000Z
|
2021-03-25T02:45:05.000Z
|
apps/zabbix/apps.py
|
ykyk1229/TurtleDove
|
074ed03396d603a920bc382ee916fc0f3adab6ea
|
[
"MIT"
] | 2
|
2020-08-05T07:59:45.000Z
|
2020-08-05T08:00:48.000Z
|
apps/zabbix/apps.py
|
sunfan666/cmdb
|
71722a8dddf5e337d7658328cfcac0c9108b067c
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ZabbixConfig(AppConfig):
name = 'zabbix'
| 14.5
| 33
| 0.747126
| 10
| 87
| 6.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 87
| 5
| 34
| 17.4
| 0.902778
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
2c33286bf46b60906134beaba1829ed0697c48cd
| 2,426
|
py
|
Python
|
libs/python/test/test_imaged_object_api.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
libs/python/test/test_imaged_object_api.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
libs/python/test/test_imaged_object_api.py
|
Scripta-Qumranica-Electronica/SQE_API_Connectors
|
aaa9b9eb8709d4257c32ea57321a179c6b1e041a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
SQE API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import qumranica_api_connector
from qumranica_api_connector.api.imaged_object_api import ImagedObjectApi # noqa: E501
from qumranica_api_connector.rest import ApiException
class TestImagedObjectApi(unittest.TestCase):
"""ImagedObjectApi unit test stubs"""
def setUp(self):
self.api = qumranica_api_connector.api.imaged_object_api.ImagedObjectApi() # noqa: E501
def tearDown(self):
pass
def test_v1_editions_edition_id_imaged_objects_get(self):
"""Test case for v1_editions_edition_id_imaged_objects_get
Provides a listing of imaged objects related to the specified edition, can include images and also their masks with optional. # noqa: E501
"""
pass
def test_v1_editions_edition_id_imaged_objects_imaged_object_id_get(self):
"""Test case for v1_editions_edition_id_imaged_objects_imaged_object_id_get
Provides information for the specified imaged object related to the specified edition, can include images and also their masks with optional. # noqa: E501
"""
pass
def test_v1_imaged_objects_imaged_object_id_get(self):
"""Test case for v1_imaged_objects_imaged_object_id_get
Provides information for the specified imaged object. # noqa: E501
"""
pass
def test_v1_imaged_objects_imaged_object_id_text_fragments_get(self):
"""Test case for v1_imaged_objects_imaged_object_id_text_fragments_get
Provides a list of all text fragments that should correspond to the imaged object. # noqa: E501
"""
pass
def test_v1_imaged_objects_institutions_get(self):
"""Test case for v1_imaged_objects_institutions_get
Provides a list of all institutional image providers. # noqa: E501
"""
pass
def test_v1_imaged_objects_institutions_institution_name_get(self):
"""Test case for v1_imaged_objects_institutions_institution_name_get
Provides a list of all institutional image providers. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 31.921053
| 164
| 0.731245
| 317
| 2,426
| 5.252366
| 0.268139
| 0.101502
| 0.072072
| 0.046847
| 0.67027
| 0.67027
| 0.659459
| 0.592793
| 0.592793
| 0.467868
| 0
| 0.021432
| 0.211459
| 2,426
| 75
| 165
| 32.346667
| 0.848928
| 0.5169
| 0
| 0.291667
| 1
| 0
| 0.007929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.291667
| 0.208333
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 4
|
2c4e3c128a7b528636324830f29b2f4c8c54768f
| 138
|
py
|
Python
|
training/unsupervised/ndim/operations.py
|
purijs/Sentinel-Training-Data-Generation
|
6d707cf197b7c822076c9cb58ca9b79c5ffdb297
|
[
"MIT"
] | null | null | null |
training/unsupervised/ndim/operations.py
|
purijs/Sentinel-Training-Data-Generation
|
6d707cf197b7c822076c9cb58ca9b79c5ffdb297
|
[
"MIT"
] | null | null | null |
training/unsupervised/ndim/operations.py
|
purijs/Sentinel-Training-Data-Generation
|
6d707cf197b7c822076c9cb58ca9b79c5ffdb297
|
[
"MIT"
] | null | null | null |
class NdComputations(object):
def __init__(self, lerp, nd_distance):
self.lerp = lerp
self.nd_distance = nd_distance
| 23
| 42
| 0.673913
| 17
| 138
| 5.058824
| 0.529412
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23913
| 138
| 6
| 43
| 23
| 0.819048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
2c71c76dbc5cf3746ed57a3d539725bfb3b8bd30
| 60
|
py
|
Python
|
python/testData/resolve/TypeDunderDocNewStyleClass.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/resolve/TypeDunderDocNewStyleClass.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/resolve/TypeDunderDocNewStyleClass.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A(object):
pass
print(A.__doc__)
# <ref>
| 10
| 16
| 0.533333
| 8
| 60
| 3.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316667
| 60
| 6
| 17
| 10
| 0.682927
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 4
|
2ca3c7bc970d88cb2f150a75cd389256ab450ff4
| 383
|
py
|
Python
|
0-python-tutorial/02-comments01.py
|
luis2ra/py3-00-w3schools
|
6bb851837f8ef9520491d13fa2c909047c9b18cf
|
[
"MIT"
] | null | null | null |
0-python-tutorial/02-comments01.py
|
luis2ra/py3-00-w3schools
|
6bb851837f8ef9520491d13fa2c909047c9b18cf
|
[
"MIT"
] | null | null | null |
0-python-tutorial/02-comments01.py
|
luis2ra/py3-00-w3schools
|
6bb851837f8ef9520491d13fa2c909047c9b18cf
|
[
"MIT"
] | null | null | null |
# @author: https://github.com/luis2ra from https://www.w3schools.com/python/python_comments.asp
'''
Python Comments
Comments can be used to explain Python code.
Comments can be used to make the code more readable.
Comments can be used to prevent execution when testing code.
Comments starts with a #, and Python will ignore them.
'''
# This is a comment.
print("Hello, World!")
| 23.9375
| 95
| 0.751958
| 60
| 383
| 4.783333
| 0.633333
| 0.114983
| 0.135889
| 0.1777
| 0.198606
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006173
| 0.154047
| 383
| 15
| 96
| 25.533333
| 0.87963
| 0.906005
| 0
| 0
| 0
| 0
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 4
|
e2c836e650ec6b90ea6e834a95a542a893d342de
| 27
|
py
|
Python
|
src/pydictionaria/__init__.py
|
clld/pydictionaria
|
e86f849ee732e11c82830dd10cf29fcbc455ced3
|
[
"Apache-2.0"
] | 1
|
2022-02-23T10:35:21.000Z
|
2022-02-23T10:35:21.000Z
|
src/pydictionaria/__init__.py
|
dictionaria/pydictionaria
|
4c18edc3b3bb95bf44dd3b2b910aaff3fcd14045
|
[
"Apache-2.0"
] | 17
|
2019-05-10T07:47:25.000Z
|
2022-03-05T23:53:11.000Z
|
src/pydictionaria/__init__.py
|
clld/pydictionaria
|
e86f849ee732e11c82830dd10cf29fcbc455ced3
|
[
"Apache-2.0"
] | null | null | null |
#
__version__ = '2.2.dev0'
| 9
| 24
| 0.62963
| 4
| 27
| 3.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.148148
| 27
| 2
| 25
| 13.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e2e6e109535f8bb2e122bb20dd4f01afe3c28c67
| 10,248
|
py
|
Python
|
papertalk/utils/mendeley/example.py
|
karissa/papertalk
|
79d77383b1a6fb41e9fef8b9c4f619df97cc8a7c
|
[
"MIT"
] | null | null | null |
papertalk/utils/mendeley/example.py
|
karissa/papertalk
|
79d77383b1a6fb41e9fef8b9c4f619df97cc8a7c
|
[
"MIT"
] | 2
|
2016-02-04T23:59:58.000Z
|
2016-02-04T23:59:58.000Z
|
papertalk/utils/mendeley/example.py
|
karissa/papertalk
|
79d77383b1a6fb41e9fef8b9c4f619df97cc8a7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Mendeley Open API Example Client
Copyright (c) 2010, Mendeley Ltd. <copyright@mendeley.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
For details of the Mendeley Open API see http://dev.mendeley.com/
Example usage:
python example.py
"""
from pprint import pprint
from mendeley_client import *
import os
import sys
# edit config.json first
mendeley = create_client()
########################################
######## Public Resources Tests ########
########################################
print """
-----------------------------------------------------
Canonical document details
-----------------------------------------------------"""
response = mendeley.details('cbcca150-6cff-11df-a2b2-0026b95e3eb7')
pprint(response)
print """
-----------------------------------------------------
Canonical document details DOI look up
-----------------------------------------------------"""
response = mendeley.details('10.1371%2Fjournal.ppat.1000281', type='doi')
pprint(response)
print """
-----------------------------------------------------
Canonical document details PubMed Id look up
-----------------------------------------------------"""
response = mendeley.details('19910365', type='pmid')
pprint(response)
print """
-----------------------------------------------------
Categories
-----------------------------------------------------"""
response = mendeley.categories()
pprint(response)
print """
-----------------------------------------------------
Subcategories
-----------------------------------------------------"""
response = mendeley.subcategories(3)
pprint(response)
print """
-----------------------------------------------------
Search
-----------------------------------------------------"""
response = mendeley.search('phiC31', items=10)
pprint(response)
print """
-----------------------------------------------------
Tagged 'modularity'
-----------------------------------------------------"""
response = mendeley.tagged('modularity', items=5)
pprint(response)
print """
-----------------------------------------------------
Tagged 'test' in category 14
-----------------------------------------------------"""
response = mendeley.tagged('test', cat=14)
pprint(response)
print """
-----------------------------------------------------
Tagged 'modularity' in subcategory 'Bioinformatics'
-----------------------------------------------------"""
response = mendeley.tagged('modularity', subcat=455)
pprint(response)
print """
-----------------------------------------------------
Related
-----------------------------------------------------"""
response = mendeley.related('91df2740-6d01-11df-a2b2-0026b95e3eb7')
pprint(response)
print """
-----------------------------------------------------
Authored by 'Ann Cowan'
-----------------------------------------------------"""
response = mendeley.authored('Ann Cowan', items=5)
pprint(response)
print """
-----------------------------------------------------
Public groups
-----------------------------------------------------"""
response = mendeley.public_groups()
pprint(response)
groupId = '536181'
print """
-----------------------------------------------------
Public group details
-----------------------------------------------------"""
response = mendeley.public_group_details(groupId)
pprint(response)
print """
-----------------------------------------------------
Public group documents
-----------------------------------------------------"""
response = mendeley.public_group_docs(groupId)
pprint(response)
print """
-----------------------------------------------------
Public group people
-----------------------------------------------------"""
response = mendeley.public_group_people(groupId)
pprint(response)
print """
-----------------------------------------------------
Author statistics
-----------------------------------------------------"""
response = mendeley.author_stats()
pprint(response)
print """
-----------------------------------------------------
Papers statistics
-----------------------------------------------------"""
response = mendeley.paper_stats()
pprint(response)
print """
-----------------------------------------------------
Publications outlets statistics
-----------------------------------------------------"""
response = mendeley.publication_stats()
pprint(response)
###############################################
######## User Specific Resources Tests ########
###############################################
print """
-----------------------------------------------------
My Library authors statistics
-----------------------------------------------------"""
response = mendeley.library_author_stats()
pprint(response)
print """
-----------------------------------------------------
My Library tag statistics
-----------------------------------------------------"""
response = mendeley.library_tag_stats()
pprint(response)
print """
-----------------------------------------------------
My Library publication statistics
-----------------------------------------------------"""
response = mendeley.library_publication_stats()
pprint(response)
### Library ###
print 'Library'
print """
-----------------------------------------------------
My Library documents
-----------------------------------------------------"""
documents = mendeley.library()
pprint(documents)
print """
-----------------------------------------------------
Create a new library document
-----------------------------------------------------"""
response = mendeley.create_document(document={'type' : 'Book','title': 'Document creation test', 'year': 2008})
pprint(response)
documentId = response['document_id']
print """
-----------------------------------------------------
Document details
-----------------------------------------------------"""
response = mendeley.document_details(documentId)
pprint(response)
print """
-----------------------------------------------------
Delete library document
-----------------------------------------------------"""
response = mendeley.delete_library_document(documentId)
pprint(response)
print """
-----------------------------------------------------
Documents authored
-----------------------------------------------------"""
response = mendeley.documents_authored()
pprint(response)
print """
-----------------------------------------------------
Create new folder
-----------------------------------------------------"""
response = mendeley.create_folder(folder={'name': 'Test folder creation'})
pprint(response)
folderId = response['folder_id']
print """
-----------------------------------------------------
Create new child folder
-----------------------------------------------------"""
response = mendeley.create_folder(folder={'name': 'Test child folder creation', 'parent':folderId})
pprint(response)
print """
-----------------------------------------------------
List folders
-----------------------------------------------------"""
folders = mendeley.folders()
pprint(folders)
print """
-----------------------------------------------------
Delete folder
-----------------------------------------------------"""
response = mendeley.delete_folder(folderId)
pprint(response)
print """
-----------------------------------------------------
Create public open group
-----------------------------------------------------"""
response = mendeley.create_group(group={'name':'My awesome public group', 'type': 'open'})
pprint(response)
groupId = response["group_id"]
print """
-----------------------------------------------------
Delete public group
-----------------------------------------------------"""
response = mendeley.delete_group(groupId)
pprint(response)
print """
-----------------------------------------------------
Create private group
-----------------------------------------------------"""
response = mendeley.create_group(group={'name':'Private group test', 'type': 'private'})
pprint(response)
groupId = response['group_id']
print """
-----------------------------------------------------
Create new group folder
-----------------------------------------------------"""
response = mendeley.create_group_folder(groupId, folder={'name': 'Test folder creation'})
pprint(response)
folderId = response['folder_id']
print """
-----------------------------------------------------
Create new child group folder
-----------------------------------------------------"""
response = mendeley.create_group_folder(groupId, folder={'name': 'Test child folder creation', 'parent':folderId})
pprint(response)
print """
-----------------------------------------------------
List group folders
-----------------------------------------------------"""
folders = mendeley.group_folders(groupId)
pprint(folders)
print """
-----------------------------------------------------
Delete group folder
-----------------------------------------------------"""
response = mendeley.delete_group_folder(groupId, folderId)
pprint(response)
print """
-----------------------------------------------------
Delete private group
-----------------------------------------------------"""
response = mendeley.delete_group(groupId)
pprint(response)
print """
-----------------------------------------------------
Current user's profile info
-----------------------------------------------------"""
response = mendeley.my_profile_info()
pprint(response)
print """
-----------------------------------------------------
Current user's contacts
-----------------------------------------------------"""
response = mendeley.contacts()
pprint(response)
| 26.968421
| 114
| 0.419594
| 703
| 10,248
| 6.054054
| 0.26458
| 0.139098
| 0.125
| 0.030545
| 0.305921
| 0.255639
| 0.178571
| 0.140038
| 0.12406
| 0.096805
| 0
| 0.009196
| 0.076795
| 10,248
| 379
| 115
| 27.039578
| 0.440651
| 0.010343
| 0
| 0.646825
| 0
| 0
| 0.634349
| 0.481108
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.015873
| null | null | 0.325397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
e2e9fd5c53d5066b87e3c2911fa1333d6225513f
| 253
|
py
|
Python
|
lstchain/visualization/tests/test_plot_dl2.py
|
labsaha/cta-lstchain
|
7e65e2cd9c42fbc827b0dcf1bccc7141203ebd22
|
[
"BSD-3-Clause"
] | null | null | null |
lstchain/visualization/tests/test_plot_dl2.py
|
labsaha/cta-lstchain
|
7e65e2cd9c42fbc827b0dcf1bccc7141203ebd22
|
[
"BSD-3-Clause"
] | null | null | null |
lstchain/visualization/tests/test_plot_dl2.py
|
labsaha/cta-lstchain
|
7e65e2cd9c42fbc827b0dcf1bccc7141203ebd22
|
[
"BSD-3-Clause"
] | 3
|
2021-06-25T14:20:17.000Z
|
2021-06-25T16:01:33.000Z
|
import pandas as pd
from lstchain.visualization import plot_dl2
from lstchain.tests.test_lstchain import dl2_file, dl2_params_lstcam_key
def test_plot_disp():
dl2_df = pd.read_hdf(dl2_file, key=dl2_params_lstcam_key)
plot_dl2.plot_disp(dl2_df)
| 31.625
| 72
| 0.822134
| 44
| 253
| 4.340909
| 0.454545
| 0.125654
| 0.157068
| 0.188482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.114625
| 253
| 8
| 73
| 31.625
| 0.816964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 4
|
3949f89dbcad477050d6fca188d02fd75c7d5286
| 917
|
py
|
Python
|
importio2/extractor_data.py
|
import-io/import-io-api-python
|
5c838a357742233e714b2ccfd19d25c18531cfa3
|
[
"Apache-2.0"
] | 1
|
2021-08-18T03:27:40.000Z
|
2021-08-18T03:27:40.000Z
|
importio2/extractor_data.py
|
import-io/import-io-api-python
|
5c838a357742233e714b2ccfd19d25c18531cfa3
|
[
"Apache-2.0"
] | null | null | null |
importio2/extractor_data.py
|
import-io/import-io-api-python
|
5c838a357742233e714b2ccfd19d25c18531cfa3
|
[
"Apache-2.0"
] | 2
|
2021-09-13T14:28:50.000Z
|
2021-09-27T17:56:21.000Z
|
#
# Copyright 2016 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class ExtractorData(object):
pass
class CSVData(ExtractorData):
def __init__(self, header=None, data=None):
self._header = header
self._data = data
def __getitem__(self, item):
return self._data[item]
def __len__(self):
return len(self._data)
class LogData(ExtractorData):
pass
| 24.131579
| 74
| 0.714286
| 129
| 917
| 4.953488
| 0.612403
| 0.093897
| 0.040689
| 0.050078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.206107
| 917
| 37
| 75
| 24.783784
| 0.866758
| 0.59542
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.166667
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 4
|
1a3c439ef6960c58744d8611e7ba305dc3cc1c62
| 202
|
py
|
Python
|
images/python_analysis/src/venv/lib/python3.7/site-packages/elasticsearch/_async/__init__.py
|
Jael24/TB_ElasticStack
|
f9aad11eda69045140a90f28739b558bf077d877
|
[
"MIT"
] | 2
|
2021-05-01T05:40:55.000Z
|
2021-06-25T13:34:46.000Z
|
images/python_analysis/src/venv/lib/python3.7/site-packages/elasticsearch/_async/__init__.py
|
Jael24/TB_ElasticStack
|
f9aad11eda69045140a90f28739b558bf077d877
|
[
"MIT"
] | 2
|
2021-02-22T14:55:01.000Z
|
2021-03-23T12:42:33.000Z
|
images/python_analysis/src/venv/lib/python3.7/site-packages/elasticsearch/_async/__init__.py
|
Jael24/TB_ElasticStack
|
f9aad11eda69045140a90f28739b558bf077d877
|
[
"MIT"
] | 1
|
2020-05-06T01:31:18.000Z
|
2020-05-06T01:31:18.000Z
|
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
| 50.5
| 75
| 0.787129
| 35
| 202
| 4.542857
| 0.714286
| 0.176101
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011976
| 0.173267
| 202
| 3
| 76
| 67.333333
| 0.94012
| 0.965347
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
1ab86d302051275aaf3796d4a4cb1e5d91b522c6
| 164
|
py
|
Python
|
eddrit/routes/api/__init__.py
|
corenting/eddrit
|
640db842d48afa8f8b379f7412c90ad9216312df
|
[
"MIT"
] | 9
|
2020-10-16T20:29:05.000Z
|
2022-03-06T08:07:06.000Z
|
eddrit/routes/api/__init__.py
|
corenting/eddrit
|
640db842d48afa8f8b379f7412c90ad9216312df
|
[
"MIT"
] | 7
|
2020-10-16T16:34:57.000Z
|
2022-01-19T17:30:29.000Z
|
eddrit/routes/api/__init__.py
|
corenting/eddrit
|
640db842d48afa8f8b379f7412c90ad9216312df
|
[
"MIT"
] | null | null | null |
from starlette.routing import Mount
from eddrit.routes.api.comments import routes as comments_routes
routes = [
Mount("/comments", routes=comments_routes),
]
| 20.5
| 64
| 0.77439
| 21
| 164
| 5.952381
| 0.47619
| 0.336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134146
| 164
| 7
| 65
| 23.428571
| 0.880282
| 0
| 0
| 0
| 0
| 0
| 0.054878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 4
|
1acbe268f731437128a0884f2c6149516b56478e
| 170
|
py
|
Python
|
Language Skills/Python/Unit 08 Loops/02 Practice makes perfect/Fun with Numbers/4-digit_sum.py
|
rhyep/Python_tutorials
|
f5c8a64b91802b005dfe7dd9035f8d8daae8c3e3
|
[
"MIT"
] | 346
|
2016-02-22T20:21:10.000Z
|
2022-01-27T20:55:53.000Z
|
Language Skills/Python/Unit 8/2-Practice makes perfect/Fun with Numbers/4-digit_sum.py
|
vpstudios/Codecademy-Exercise-Answers
|
ebd0ee8197a8001465636f52c69592ea6745aa0c
|
[
"MIT"
] | 55
|
2016-04-07T13:58:44.000Z
|
2020-06-25T12:20:24.000Z
|
Language Skills/Python/Unit 8/2-Practice makes perfect/Fun with Numbers/4-digit_sum.py
|
vpstudios/Codecademy-Exercise-Answers
|
ebd0ee8197a8001465636f52c69592ea6745aa0c
|
[
"MIT"
] | 477
|
2016-02-21T06:17:02.000Z
|
2021-12-22T10:08:01.000Z
|
def digit_sum(n):
b = []
n = str(n)
for a in n:
a = int(a)
b.append(a)
return sum(b)
print sum(b)
| 18.888889
| 27
| 0.341176
| 24
| 170
| 2.375
| 0.541667
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.541176
| 170
| 8
| 28
| 21.25
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.125
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
46d41f15f46cf2b1d80352d88c0c09e2a4750a52
| 68,387
|
py
|
Python
|
test/test_flow_class.py
|
RViMLab/oflibpytorch
|
5a6573fe25e174a6d3bd678320b6c20cccfc9b0a
|
[
"MIT"
] | 5
|
2021-08-08T21:03:44.000Z
|
2022-02-23T21:50:08.000Z
|
test/test_flow_class.py
|
RViMLab/oflibpytorch
|
5a6573fe25e174a6d3bd678320b6c20cccfc9b0a
|
[
"MIT"
] | null | null | null |
test/test_flow_class.py
|
RViMLab/oflibpytorch
|
5a6573fe25e174a6d3bd678320b6c20cccfc9b0a
|
[
"MIT"
] | 1
|
2021-08-08T21:03:46.000Z
|
2021-08-08T21:03:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright: 2021, Claudio S. Ravasio
# License: MIT (https://opensource.org/licenses/MIT)
# Author: Claudio S. Ravasio, PhD student at University College London (UCL), research assistant at King's College
# London (KCL), supervised by:
# Dr Christos Bergeles, PI of the Robotics and Vision in Medicine (RViM) lab in the School of Biomedical Engineering &
# Imaging Sciences (BMEIS) at King's College London (KCL)
# Prof Lyndon Da Cruz, consultant ophthalmic surgeon, Moorfields Eye Hospital, London UK
#
# This file is part of oflibpytorch
import unittest
import torch
import cv2
import numpy as np
import math
import sys
sys.path.append('..')
from src.oflibpytorch.flow_class import Flow
from src.oflibpytorch.utils import to_numpy, apply_flow, matrix_from_transforms, resize_flow
class FlowTest(unittest.TestCase):
def test_flow(self):
if torch.cuda.is_available():
expected_device_list = ['cpu', 'cuda', 'cpu']
else:
expected_device_list = ['cpu', 'cpu', 'cpu']
vecs_np_2hw = np.zeros((2, 100, 200))
vecs_np_hw2 = np.zeros((100, 200, 2))
vecs_pt_2hw = torch.zeros((2, 100, 200))
vecs_pt_hw2 = torch.zeros((100, 200, 2))
mask_empty = None
mask_np = np.ones((100, 200), 'bool')
mask_pt = torch.ones(100, 200).to(torch.bool)
for vecs in [vecs_np_2hw, vecs_np_hw2, vecs_pt_2hw, vecs_pt_hw2]:
for ref, ref_expected in zip(['t', 's', None], ['t', 's', 't']):
for mask in [mask_empty, mask_np, mask_pt]:
for device, device_expected in zip(['cpu', 'cuda', None], expected_device_list):
flow = Flow(vecs, ref=ref, mask=mask, device=device)
self.assertIsNone(np.testing.assert_equal(to_numpy(flow.vecs), vecs_np_2hw))
self.assertIsNone(np.testing.assert_equal(flow.vecs_numpy, vecs_np_hw2))
self.assertEqual(flow.ref, ref_expected)
self.assertIsNone(np.testing.assert_equal(to_numpy(flow.mask), mask_np))
self.assertEqual(flow.device, device_expected)
self.assertEqual(flow.vecs.device.type, device_expected)
self.assertEqual(flow.mask.device.type, device_expected)
# tensor to cuda, test cuda
if torch.cuda.is_available():
expected_device_list = ['cpu', 'cuda', 'cuda']
else:
expected_device_list = ['cpu', 'cpu', 'cpu']
vecs_pt_cuda = torch.zeros((2, 100, 200)).to('cuda')
for ref, ref_expected in zip(['t', 's', None], ['t', 's', 't']):
for mask in [mask_empty, mask_np, mask_pt]:
for device, device_expected in zip(['cpu', 'cuda', None], expected_device_list):
flow = Flow(vecs_pt_cuda, ref=ref, mask=mask, device=device)
self.assertIsNone(np.testing.assert_equal(to_numpy(flow.vecs), vecs_np_2hw))
self.assertIsNone(np.testing.assert_equal(flow.vecs_numpy, vecs_np_hw2))
self.assertEqual(flow.ref, ref_expected)
self.assertIsNone(np.testing.assert_equal(to_numpy(flow.mask), mask_np))
self.assertEqual(flow.device, device_expected)
self.assertEqual(flow.vecs.device.type, device_expected)
self.assertEqual(flow.mask.device.type, device_expected)
# Wrong flow vector type or shape
with self.assertRaises(TypeError):
Flow('test')
with self.assertRaises(ValueError):
Flow(np.zeros((2, 100, 200, 1)))
with self.assertRaises(ValueError):
Flow(torch.ones(2, 100, 200, 1))
with self.assertRaises(ValueError):
Flow(np.zeros((3, 100, 200)))
with self.assertRaises(ValueError):
Flow(torch.ones(3, 100, 200))
# Invalid flow vector values
vectors = np.random.rand(100, 200, 2)
vectors[10, 10] = np.NaN
vectors[20, 20] = np.Inf
vectors[30, 30] = -np.Inf
with self.assertRaises(ValueError):
Flow(vectors)
vectors = torch.tensor(vectors)
with self.assertRaises(ValueError):
Flow(vectors)
# Wrong mask shape
vecs = torch.zeros((2, 100, 200))
with self.assertRaises(TypeError):
Flow(vecs, mask='test')
with self.assertRaises(ValueError):
Flow(vecs, mask=np.zeros((2, 100, 200)))
with self.assertRaises(ValueError):
Flow(vecs, mask=torch.ones(2, 100, 200))
with self.assertRaises(ValueError):
Flow(vecs, mask=np.zeros((101, 200)))
with self.assertRaises(ValueError):
Flow(vecs, mask=torch.ones(100, 201))
with self.assertRaises(ValueError):
Flow(vecs, mask=np.ones((100, 200)) * 20)
with self.assertRaises(ValueError):
Flow(vecs, mask=torch.ones(100, 200) * 10)
def test_zero(self):
if torch.cuda.is_available():
expected_device_list = ['cpu', 'cuda']
else:
expected_device_list = ['cpu', 'cpu']
shape = [200, 300]
zero_flow = Flow.zero(shape)
self.assertIsNone(np.testing.assert_equal(zero_flow.shape, shape))
self.assertIsNone(np.testing.assert_equal(zero_flow.vecs_numpy, 0))
self.assertIs(zero_flow.ref, 't')
zero_flow = Flow.zero(shape, 's')
self.assertIs(zero_flow.ref, 's')
for device, expected_device in zip(['cpu', 'cuda'], expected_device_list):
flow = Flow.zero(shape, device=device)
self.assertEqual(flow.vecs.device.type, expected_device)
self.assertEqual(flow.mask.device.type, expected_device)
def test_from_matrix(self):
# With reference 's', this simply corresponds to using flow_from_matrix, tested in test_utils.
# With reference 't':
# Rotation of 30 degrees clockwise around point [10, 50] (hor, ver)
matrix_np = np.array([[math.sqrt(3) / 2, -.5, 26.3397459622],
[.5, math.sqrt(3) / 2, 1.69872981078],
[0, 0, 1]])
matrix_pt = torch.tensor(matrix_np)
shape = [200, 300]
matrix_device_list = ['cpu', 'cuda']
flow_device_list = ['cpu', 'cuda', None]
if torch.cuda.is_available():
flow_expected_device_list = ['cpu', 'cuda', None]
else:
flow_expected_device_list = ['cpu', 'cpu', 'cpu']
for matrix in [matrix_pt, matrix_np]:
for matrix_device in matrix_device_list:
for flow_device, flow_expected_device in zip(flow_device_list, flow_expected_device_list):
if isinstance(matrix, torch.Tensor):
matrix = matrix.to(matrix_device)
flow = Flow.from_matrix(matrix, shape, 't', device=flow_device)
if flow_expected_device is None: # If no device passed, expect same device as the matrix passed in
flow_expected_device = matrix.device.type if isinstance(matrix, torch.Tensor) else 'cpu'
self.assertEqual(flow.device, flow_expected_device)
self.assertIsNone(np.testing.assert_allclose(flow.vecs_numpy[50, 10], [0, 0], atol=1e-4))
self.assertIsNone(np.testing.assert_allclose(flow.vecs_numpy[50, 299], [38.7186583063, 144.5],
atol=1e-4, rtol=1e-4))
self.assertIsNone(np.testing.assert_allclose(flow.vecs_numpy[199, 10], [-74.5, 19.9622148361],
atol=1e-4, rtol=1e-4))
self.assertIsNone(np.testing.assert_equal(flow.shape, shape))
def test_from_transforms(self):
shape = [10, 20]
transforms = [['rotation', 5, 10, -30]]
for device in ['cpu', 'cuda', None]:
flow = Flow.from_transforms(transforms, shape, device=device)
expected_device = device if torch.cuda.is_available() and device is not None else 'cpu'
self.assertEqual(flow.device, expected_device)
def test_from_kitti(self):
path = 'kitti.png'
f = Flow.from_kitti(path, load_valid=True)
desired_flow = np.arange(0, 10)[:, np.newaxis] * np.arange(0, 20)[np.newaxis, :]
self.assertIsNone(np.testing.assert_equal(f.vecs_numpy[..., 0], desired_flow))
self.assertIsNone(np.testing.assert_equal(f.vecs_numpy[..., 1], 0))
self.assertIsNone(np.testing.assert_equal(f.mask_numpy[:, 0], True))
self.assertIsNone(np.testing.assert_equal(f.mask_numpy[:, 10], False))
f = Flow.from_kitti(path, load_valid=False)
self.assertIsNone(np.testing.assert_equal(f.mask_numpy, True))
with self.assertRaises(TypeError): # Wrong load_valid type
Flow.from_kitti(path, load_valid='test')
with self.assertRaises(ValueError): # Wrong path
Flow.from_kitti('test')
with self.assertRaises(ValueError): # Wrong flow shape
Flow.from_kitti('kitti_wrong.png')
def test_from_sintel(self):
path = 'sintel.flo'
f = Flow.from_sintel(path)
desired_flow = np.arange(0, 10)[:, np.newaxis] * np.arange(0, 20)[np.newaxis, :]
self.assertIsNone(np.testing.assert_equal(f.vecs_numpy[..., 0], desired_flow))
self.assertIsNone(np.testing.assert_equal(f.mask_numpy, True))
f = Flow.from_sintel(path, 'sintel_invalid.png')
self.assertIsNone(np.testing.assert_equal(f.mask_numpy[:, 0], True))
self.assertIsNone(np.testing.assert_equal(f.mask_numpy[:, 10], False))
with self.assertRaises(ValueError): # Wrong tag
Flow.from_sintel('sintel_wrong.flo')
with self.assertRaises(ValueError): # Wrong mask path
Flow.from_sintel(path, 'test.png')
with self.assertRaises(ValueError): # Wrong mask shape
Flow.from_sintel(path, 'sintel_invalid_wrong.png')
def test_copy(self):
vectors = np.random.rand(200, 200, 2)
mask = np.random.rand(200, 200) > 0.5
for ref in ['t', 's']:
for device in ['cpu', 'cuda']:
flow = Flow(vectors, ref, mask, device)
flow_copy = flow.copy()
self.assertIsNone(np.testing.assert_equal(flow.vecs_numpy, flow_copy.vecs_numpy))
self.assertIsNone(np.testing.assert_equal(flow.mask_numpy, flow_copy.mask_numpy))
self.assertEqual(flow.ref, flow_copy.ref)
self.assertEqual(flow.device, flow_copy.device)
self.assertNotEqual(id(flow), id(flow_copy))
def test_to_device(self):
vectors = np.random.rand(200, 200, 2)
mask = np.random.rand(200, 200) > 0.5
for ref in ['t', 's']:
for start_device in ['cpu', 'cuda']:
for target_device in ['cpu', 'cuda']:
flow = Flow(vectors, ref, mask, start_device)
f = flow.to_device(target_device)
self.assertIsNone(np.testing.assert_equal(flow.vecs_numpy, f.vecs_numpy))
self.assertIsNone(np.testing.assert_equal(flow.mask_numpy, f.mask_numpy))
self.assertEqual(flow.ref, f.ref)
self.assertEqual(f.device, target_device)
def test_str(self):
flow = Flow.zero(shape=(100, 200), ref='s', device='cuda')
self.assertEqual(str(flow)[:54],
"Flow object, reference s, shape 100*200, device cuda; ")
def test_getitem(self):
vectors = np.random.rand(200, 200, 2)
flow = Flow(vectors)
indices = np.random.randint(0, 150, size=(20, 2))
for i in indices:
# Cutting a number of elements
self.assertIsNone(np.testing.assert_allclose(flow[i].vecs_numpy, vectors[i]))
# Cutting a specific item
self.assertIsNone(np.testing.assert_allclose(flow[i[0]:i[0] + 1, i[1]:i[1] + 1].vecs_numpy,
vectors[i[0]:i[0] + 1, i[1]:i[1] + 1]))
# Cutting an area
self.assertIsNone(np.testing.assert_allclose(flow[i[0]:i[0] + 40, i[1]:i[1] + 40].vecs_numpy,
vectors[i[0]:i[0] + 40, i[1]:i[1] + 40]))
# Make sure the device hasn't changed
for device in ['cpu', 'cuda']:
flow = Flow(vectors, device=device)
expected_device = device if torch.cuda.is_available() else 'cpu'
self.assertEqual(flow[10:20].device, expected_device)
def test_add(self):
mask1 = np.ones((100, 200), 'bool')
mask1[:40] = 0
mask2 = np.ones((100, 200), 'bool')
mask2[60:] = 0
vecs1 = np.random.rand(100, 200, 2)
vecs2 = np.random.rand(100, 200, 2)
vecs2_np_2hw = np.random.rand(2, 100, 200)
vecs2_pt_2hw = torch.rand(2, 100, 200)
vecs2_pt_hw2 = torch.rand(100, 200, 2)
vec_list = [vecs2, vecs2_np_2hw, vecs2_pt_2hw, vecs2_pt_hw2]
if torch.cuda.is_available():
vec_list.append(torch.rand(2, 100, 200).to('cuda'))
vec_list.append(torch.rand(100, 200, 2).to('cuda'))
vecs3 = np.random.rand(200, 200, 2)
flow1 = Flow(vecs1, mask=mask1)
flow2 = Flow(vecs2, mask=mask2)
flow3 = Flow(vecs3)
# Addition
for vecs in vec_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
if v.shape[0] == 2:
v = np.moveaxis(v, 0, -1)
self.assertIsNone(np.testing.assert_allclose((flow1 + vecs).vecs_numpy, vecs1 + v,
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 + vecs).device, flow1.vecs.device.type)
self.assertEqual((flow1 + vecs).device, flow1.mask.device.type)
self.assertIsNone(np.testing.assert_allclose((flow1 + flow2).vecs_numpy, vecs1 + vecs2, rtol=1e-6, atol=1e-6))
self.assertIsNone(np.testing.assert_equal(np.sum(to_numpy((flow1 + flow2).mask)), (60 - 40) * 200))
with self.assertRaises(TypeError):
flow1 + 'test'
with self.assertRaises(ValueError):
flow1 + flow3
with self.assertRaises(ValueError):
flow1 + vecs3
def test_sub(self):
mask1 = np.ones((100, 200), 'bool')
mask1[:40] = 0
mask2 = np.ones((100, 200), 'bool')
mask2[60:] = 0
vecs1 = np.random.rand(100, 200, 2)
vecs2 = np.random.rand(100, 200, 2)
vecs2_np_2hw = np.random.rand(2, 100, 200)
vecs2_pt_2hw = torch.rand(2, 100, 200)
vecs2_pt_hw2 = torch.rand(100, 200, 2)
vec_list = [vecs2, vecs2_np_2hw, vecs2_pt_2hw, vecs2_pt_hw2]
if torch.cuda.is_available():
vec_list.append(torch.rand(2, 100, 200).to('cuda'))
vec_list.append(torch.rand(100, 200, 2).to('cuda'))
vecs3 = np.random.rand(200, 200, 2)
flow1 = Flow(vecs1, mask=mask1)
flow2 = Flow(vecs2, mask=mask2)
flow3 = Flow(vecs3)
# Subtraction
for vecs in vec_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
if v.shape[0] == 2:
v = np.moveaxis(v, 0, -1)
self.assertIsNone(np.testing.assert_allclose((flow1 - vecs).vecs_numpy, vecs1 - v,
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 + vecs).device, flow1.vecs.device.type)
self.assertEqual((flow1 + vecs).device, flow1.mask.device.type)
self.assertIsNone(np.testing.assert_allclose((flow1 - flow2).vecs_numpy, vecs1 - vecs2, rtol=1e-6, atol=1e-6))
self.assertIsNone(np.testing.assert_equal(np.sum(to_numpy((flow1 - flow2).mask)), (60 - 40) * 200))
with self.assertRaises(TypeError):
flow1 - 'test'
with self.assertRaises(ValueError):
flow1 - flow3
with self.assertRaises(ValueError):
flow1 - vecs3
def test_mul(self):
vecs1 = np.random.rand(100, 200, 2)
vecs2 = np.random.rand(100, 200, 2)
flow1 = Flow(vecs1)
# Multiplication
ints = np.random.randint(-10, 10, 100)
floats = (np.random.rand(100) - .5) * 20
# ... using ints and floats
for i, f in zip(ints, floats):
self.assertIsNone(np.testing.assert_allclose((flow1 * i).vecs_numpy, vecs1 * i, rtol=1e-6, atol=1e-6))
self.assertIsNone(np.testing.assert_allclose((flow1 * f).vecs_numpy, vecs1 * f, rtol=1e-6, atol=1e-6))
# ... using a list of length 2
int_list = np.random.randint(-10, 10, (100, 2))
for li in int_list:
v = vecs1.astype('f')
v[..., 0] *= li[0]
v[..., 1] *= li[1]
self.assertIsNone(np.testing.assert_allclose((flow1 * list(li)).vecs_numpy, v, rtol=1e-6, atol=1e-6))
# ... using a numpy array of size 2
int_list = np.random.randint(-10, 10, (100, 2))
for li in int_list:
v = vecs1.astype('f')
v[..., 0] *= li[0]
v[..., 1] *= li[1]
self.assertIsNone(np.testing.assert_allclose((flow1 * li).vecs_numpy, v, rtol=1e-6, atol=1e-6))
# ... using a numpy array and torch tensor of the same shape as the flow
vecs2_pt_hw2 = torch.rand(100, 200, 2)
vecs_list = [vecs2, vecs2_pt_hw2]
if torch.cuda.is_available():
vecs_list.append(torch.rand(100, 200, 2).to('cuda'))
for vecs in vecs_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
self.assertIsNone(np.testing.assert_allclose((flow1 * vecs[..., 0]).vecs_numpy, vecs1 * v[..., :1],
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 * vecs[..., 0]).device, flow1.vecs.device.type)
self.assertEqual((flow1 * vecs[..., 0]).device, flow1.mask.device.type)
# ... using numpy arrays and torch tensors of the same shape as the flow vectors
vecs2_np_2hw = np.random.rand(2, 100, 200)
vecs2_pt_2hw = torch.rand(2, 100, 200)
vecs2_pt_hw2 = torch.rand(100, 200, 2)
vecs_list = [vecs2, vecs2_np_2hw, vecs2_pt_2hw, vecs2_pt_hw2]
if torch.cuda.is_available():
vecs_list.append(torch.rand(2, 100, 200).to('cuda'))
vecs_list.append(torch.rand(100, 200, 2).to('cuda'))
for vecs in vecs_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
if v.shape[0] == 2:
v = np.moveaxis(v, 0, -1)
self.assertIsNone(np.testing.assert_allclose((flow1 * vecs).vecs_numpy, vecs1 * v,
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 * vecs).device, flow1.vecs.device.type)
self.assertEqual((flow1 * vecs).device, flow1.mask.device.type)
# ... using a list of the wrong length
with self.assertRaises(ValueError):
flow1 * [0, 1, 2]
# ... using a numpy array of the wrong size
with self.assertRaises(ValueError):
flow1 * np.array([0, 1, 2])
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 * np.random.rand(200, 200)
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 * np.random.rand(200, 200, 2)
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 * np.random.rand(100, 200, 3)
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 * np.random.rand(200, 200, 2, 1)
def test_div(self):
vecs1 = np.random.rand(100, 200, 2) + .5
vecs2 = -np.random.rand(100, 200, 2) - .5
flow1 = Flow(vecs1)
# Division
ints = np.random.randint(-10, 10, 100)
floats = (np.random.rand(100) - .5) * 20
# ... using ints and floats
for i, f in zip(ints, floats):
if i < -1e-5 or i > 1e-5:
self.assertIsNone(np.testing.assert_allclose((flow1 / i).vecs_numpy, vecs1 / i, rtol=1e-6, atol=1e-6))
if f < -1e-5 or f > 1e-5:
self.assertIsNone(np.testing.assert_allclose((flow1 / f).vecs_numpy, vecs1 / f, rtol=1e-6, atol=1e-6))
# ... using a list of length 2
int_list = np.random.randint(-10, 10, (100, 2))
for li in int_list:
if li[0] != 0 and li[1] != 0:
v = vecs1.astype('f')
v[..., 0] /= li[0]
v[..., 1] /= li[1]
self.assertIsNone(np.testing.assert_allclose((flow1 / list(li)).vecs_numpy, v, rtol=1e-6, atol=1e-6))
# ... using a numpy array of size 2
int_list = np.random.randint(-10, 10, (100, 2))
for li in int_list:
if li[0] != 0 and li[1] != 0:
v = vecs1.astype('f')
v[..., 0] /= li[0]
v[..., 1] /= li[1]
self.assertIsNone(np.testing.assert_allclose((flow1 / li).vecs_numpy, v, rtol=1e-6, atol=1e-6))
# ... using a numpy array and torch tensor of the same shape as the flow
vecs2_pt_hw2 = torch.rand(100, 200, 2) + .5
vecs_list = [vecs2, vecs2_pt_hw2]
if torch.cuda.is_available():
vecs_list.append(torch.rand(100, 200, 2).to('cuda') + .5)
for vecs in vecs_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
self.assertIsNone(np.testing.assert_allclose((flow1 / vecs[..., 0]).vecs_numpy, vecs1 / v[..., :1],
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 / vecs[..., 0]).device, flow1.vecs.device.type)
self.assertEqual((flow1 / vecs[..., 0]).device, flow1.mask.device.type)
# ... using numpy arrays and torch tensors of the same shape as the flow vectors
vecs2_np_2hw = np.random.rand(2, 100, 200) + .5
vecs2_pt_2hw = torch.rand(2, 100, 200) + .5
vecs2_pt_hw2 = torch.rand(100, 200, 2) + .5
vecs_list = [vecs2, vecs2_np_2hw, vecs2_pt_2hw, vecs2_pt_hw2]
if torch.cuda.is_available():
vecs_list.append(torch.rand(2, 100, 200).to('cuda') + .5)
vecs_list.append(torch.rand(100, 200, 2).to('cuda') + .5)
for vecs in vecs_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
if v.shape[0] == 2:
v = np.moveaxis(v, 0, -1)
self.assertIsNone(np.testing.assert_allclose((flow1 / vecs).vecs_numpy, vecs1 / v,
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 / vecs).device, flow1.vecs.device.type)
self.assertEqual((flow1 / vecs).device, flow1.mask.device.type)
# ... using a list of the wrong length
with self.assertRaises(ValueError):
flow1 / [1, 2, 3]
# ... using a numpy array of the wrong size
with self.assertRaises(ValueError):
flow1 / np.array([1, 2, 3])
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 / np.ones((200, 200))
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 / np.ones((200, 200, 2))
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 / np.ones((100, 200, 3))
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 / np.ones((200, 200, 2, 1))
def test_pow(self):
vecs1 = np.random.rand(100, 200, 2)
vecs2 = np.random.rand(100, 200, 2)
flow1 = Flow(vecs1)
# Exponentiation
ints = np.random.randint(-2, 2, 100)
floats = (np.random.rand(100) - .5) * 4
# ... using ints and floats
for i, f in zip(ints, floats):
self.assertIsNone(np.testing.assert_allclose((flow1 ** i).vecs_numpy, vecs1 ** i, rtol=1e-6, atol=1e-6))
self.assertIsNone(np.testing.assert_allclose((flow1 ** f).vecs_numpy, vecs1 ** f, rtol=1e-6, atol=1e-6))
# ... using a list of length 2
int_list = np.random.randint(-5, 5, (100, 2))
for li in int_list:
v = vecs1.astype('f')
v[..., 0] **= li[0]
v[..., 1] **= li[1]
self.assertIsNone(np.testing.assert_allclose((flow1 ** list(li)).vecs_numpy, v, rtol=1e-6, atol=1e-6))
# ... using a numpy array of size 2
int_list = np.random.randint(-5, 5, (100, 2))
for li in int_list:
v = vecs1.astype('f')
v[..., 0] **= li[0]
v[..., 1] **= li[1]
self.assertIsNone(np.testing.assert_allclose((flow1 ** li).vecs_numpy, v, rtol=1e-6, atol=1e-6))
# ... using a numpy array and torch tensor of the same shape as the flow
vecs2_pt_hw2 = torch.rand(100, 200, 2)
vecs_list = [vecs2, vecs2_pt_hw2]
if torch.cuda.is_available():
vecs_list.append(torch.rand(100, 200, 2).to('cuda'))
for vecs in vecs_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
self.assertIsNone(np.testing.assert_allclose((flow1 ** vecs[..., 0]).vecs_numpy, vecs1 ** v[..., :1],
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 ** vecs[..., 0]).device, flow1.vecs.device.type)
self.assertEqual((flow1 ** vecs[..., 0]).device, flow1.mask.device.type)
# ... using numpy arrays and torch tensors of the same shape as the flow vectors
vecs2_np_2hw = np.random.rand(2, 100, 200)
vecs2_pt_2hw = torch.rand(2, 100, 200)
vecs2_pt_hw2 = torch.rand(100, 200, 2)
vecs_list = [vecs2, vecs2_np_2hw, vecs2_pt_2hw, vecs2_pt_hw2]
if torch.cuda.is_available():
vecs_list.append(torch.rand(2, 100, 200).to('cuda'))
vecs_list.append(torch.rand(100, 200, 2).to('cuda'))
for vecs in vecs_list:
if isinstance(vecs, torch.Tensor):
v = to_numpy(vecs)
else:
v = vecs
if v.shape[0] == 2:
v = np.moveaxis(v, 0, -1)
self.assertIsNone(np.testing.assert_allclose((flow1 ** vecs).vecs_numpy, vecs1 ** v,
rtol=1e-6, atol=1e-6))
self.assertEqual((flow1 ** vecs).device, flow1.vecs.device.type)
self.assertEqual((flow1 ** vecs).device, flow1.mask.device.type)
# ... using a list of the wrong length
with self.assertRaises(ValueError):
flow1 ** [0, 1, 2]
# ... using a numpy array of the wrong size
with self.assertRaises(ValueError):
flow1 ** np.array([0, 1, 2])
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 ** np.random.rand(200, 200)
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 ** np.random.rand(200, 200, 2)
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 ** np.random.rand(100, 200, 3)
# ... using a numpy array of the wrong shape
with self.assertRaises(ValueError):
flow1 ** np.random.rand(200, 200, 2, 1)
def test_neg(self):
vecs1 = np.random.rand(100, 200, 2)
flow1 = Flow(vecs1)
self.assertIsNone(np.testing.assert_allclose((-flow1).vecs_numpy, -vecs1))
def test_resize(self):
shape = [20, 10]
ref = 's'
flow = Flow.from_transforms([['rotation', 30, 50, 30]], shape, ref)
# Different scales
scales = [.2, .5, 1, 1.5, 2, 10]
for scale in scales:
self.assertIsNone(np.testing.assert_equal(flow.resize(scale).vecs_numpy,
to_numpy(resize_flow(flow.vecs, scale), switch_channels=True)))
# Scale mask
shape_small = (20, 40)
shape_large = (30, 80)
mask_small = np.ones(shape_small, 'bool')
mask_small[:6, :20] = 0
mask_large = np.ones(shape_large, 'bool')
mask_large[:9, :40] = 0
flow_small = Flow.from_transforms([['rotation', 0, 0, 30]], shape_small, 't', mask_small)
flow_large = flow_small.resize((1.5, 2))
self.assertIsNone(np.testing.assert_equal(to_numpy(flow_large.mask), mask_large))
# Check scaling is performed correctly based on the actual flow field
ref = 't'
flow_small = Flow.from_transforms([['rotation', 0, 0, 30]], (50, 80), ref)
flow_large = Flow.from_transforms([['rotation', 0, 0, 30]], (150, 240), ref)
flow_resized = flow_large.resize(1/3)
self.assertIsNone(np.testing.assert_allclose(flow_resized.vecs_numpy, flow_small.vecs_numpy, atol=1, rtol=.1))
def test_pad(self):
shape = [100, 80]
for ref in ['t', 's']:
flow = Flow.zero(shape, ref, np.ones(shape, 'bool'))
flow = flow.pad([10, 20, 30, 40])
self.assertIsNone(np.testing.assert_equal(flow.shape[:2], [shape[0] + 10 + 20, shape[1] + 30 + 40]))
self.assertIsNone(np.testing.assert_equal(flow.vecs_numpy, 0))
self.assertIsNone(np.testing.assert_equal(to_numpy(flow[10:-20, 30:-40].mask), 1))
flow.mask[10:-20, 30:-40] = 0
self.assertIsNone(np.testing.assert_equal(to_numpy(flow.mask), 0))
self.assertIs(flow.ref, ref)
# 'Replicate' padding
flow = Flow.from_transforms([['rotation', 30, 50, 30]], shape, ref)
padded_flow = flow.pad([10, 10, 20, 20], mode='replicate')
self.assertIsNone(np.testing.assert_equal(padded_flow.vecs_numpy[0, 20:-20], flow.vecs_numpy[0]))
self.assertIsNone(np.testing.assert_equal(padded_flow.vecs_numpy[10:-10, 0], flow.vecs_numpy[:, 0]))
# 'Reflect' padding
padded_flow = flow.pad([10, 10, 20, 20], mode='reflect')
self.assertIsNone(np.testing.assert_equal(padded_flow.vecs_numpy[0, 20:-20], flow.vecs_numpy[10]))
self.assertIsNone(np.testing.assert_equal(padded_flow.vecs_numpy[10:-10, 0], flow.vecs_numpy[:, 20]))
# Invalid padding mode
with self.assertRaises(ValueError):
flow.pad([10, 10, 20, 20], mode='test')
def test_apply(self):
img_np = np.moveaxis(cv2.imread('smudge.png'), -1, 0)
img_pt = torch.tensor(img_np)
# Check flow.apply results in the same as using apply_flow directly
for ref in ['t', 's']:
for consider_mask in [True, False]:
for device in ['cpu', 'cuda']:
for img in [img_pt.to('cpu'), img_pt.to('cuda')]:
mask = torch.ones(img_pt.shape[1:], dtype=torch.bool)
mask[400:] = False
flow = Flow.from_transforms([['rotation', 30, 50, 30]], img.shape[1:], ref, mask, device)
# Target is a 3D torch tensor
warped_img_desired = apply_flow(flow.vecs, img, ref, mask if consider_mask else None)
warped_img_actual = flow.apply(img, consider_mask=consider_mask)
self.assertEqual(flow.device, warped_img_actual.device.type)
self.assertIsNone(np.testing.assert_equal(to_numpy(warped_img_actual),
to_numpy(warped_img_desired)))
warped_img_actual, _ = flow.apply(img, mask, True, consider_mask=consider_mask)
self.assertIsNone(np.testing.assert_equal(to_numpy(warped_img_actual),
to_numpy(warped_img_desired)))
# Target is a 2D torch tensor
warped_img_desired = apply_flow(flow.vecs, img[0], ref, mask if consider_mask else None)
warped_img_actual = flow.apply(img[0], consider_mask=consider_mask)
self.assertEqual(flow.device, warped_img_actual.device.type)
self.assertIsNone(np.testing.assert_equal(to_numpy(warped_img_actual),
to_numpy(warped_img_desired)))
warped_img_actual, _ = flow.apply(img[0], mask, True, consider_mask=consider_mask)
self.assertIsNone(np.testing.assert_equal(to_numpy(warped_img_actual),
to_numpy(warped_img_desired)))
for f_device in ['cpu', 'cuda']:
f = flow.to_device(f_device)
# Target is a flow object
warped_flow_desired = apply_flow(flow.vecs, f.vecs, ref, mask if consider_mask else None)
warped_flow_actual = flow.apply(f, consider_mask=consider_mask)
self.assertEqual(flow.device, warped_flow_actual.device)
self.assertIsNone(np.testing.assert_equal(to_numpy(warped_flow_actual.vecs),
to_numpy(warped_flow_desired)))
# Check using a smaller flow field on a larger target works the same as a full flow field on the same target
img = img_pt
ref = 't'
flow = Flow.from_transforms([['rotation', 30, 50, 30]], img.shape[1:], ref)
warped_img_desired = apply_flow(flow.vecs, img, ref)
shape = [img.shape[1] - 90, img.shape[2] - 110]
padding = [50, 40, 30, 80]
cut_flow = Flow.from_transforms([['rotation', 0, 0, 30]], shape, ref)
# # ... not cutting (target torch tensor)
# warped_img_actual = cut_flow.apply(img, padding=padding, cut=False)
# self.assertIsNone(np.testing.assert_equal(to_numpy(warped_img_actual[padding[0]:-padding[1],
# padding[2]:-padding[3]]),
# to_numpy(warped_img_desired[padding[0]:-padding[1],
# padding[2]:-padding[3]])))
# ... cutting (target torch tensor)
warped_img_actual = cut_flow.apply(img, padding=padding, cut=True)
self.assertIsNone(np.testing.assert_allclose(to_numpy(warped_img_actual).astype('f'),
to_numpy(warped_img_desired[:, padding[0]:-padding[1],
padding[2]:-padding[3]]).astype('f'),
atol=1)) # result rounded (uint8), so errors can be 1
# ... not cutting (target flow object)
target_flow = Flow.from_transforms([['rotation', 30, 50, 30]], img.shape[1:], ref)
warped_flow_desired = apply_flow(flow.vecs, target_flow.vecs, ref)
warped_flow_actual = cut_flow.apply(target_flow, padding=padding, cut=False)
self.assertIsNone(np.testing.assert_allclose(to_numpy(warped_flow_actual.vecs[:, padding[0]:-padding[1],
padding[2]:-padding[3]]),
to_numpy(warped_flow_desired[:, padding[0]:-padding[1],
padding[2]:-padding[3]]),
atol=1e-1))
# ... cutting (target flow object)
warped_flow_actual = cut_flow.apply(target_flow, padding=padding, cut=True)
self.assertIsNone(np.testing.assert_allclose(to_numpy(warped_flow_actual.vecs),
to_numpy(warped_flow_desired[:, padding[0]:-padding[1],
padding[2]:-padding[3]]),
atol=1e-1))
# Non-valid padding values
for ref in ['t', 's']:
flow = Flow.from_transforms([['rotation', 0, 0, 30]], shape, ref)
with self.assertRaises(TypeError):
flow.apply(target_flow, return_valid_area='test')
with self.assertRaises(TypeError):
flow.apply(target_flow, consider_mask='test')
with self.assertRaises(TypeError):
flow.apply(target_flow, padding=100, cut=True)
with self.assertRaises(ValueError):
flow.apply(target_flow, padding=[10, 20, 30, 40, 50], cut=True)
with self.assertRaises(ValueError):
flow.apply(target_flow, padding=[10., 20, 30, 40], cut=True)
with self.assertRaises(ValueError):
flow.apply(target_flow, padding=[-10, 10, 10, 10], cut=True)
with self.assertRaises(TypeError):
flow.apply(target_flow, padding=[10, 20, 30, 40, 50], cut=2)
with self.assertRaises(TypeError):
flow.apply(target_flow, padding=[10, 20, 30, 40, 50], cut='true')
def test_switch_ref(self):
shape = (200, 300)
# Mode 'invalid'
for refs in [['t', 's'], ['s', 't']]:
flow = Flow.from_transforms([['rotation', 30, 50, 30]], shape, refs[0])
flow = flow.switch_ref(mode='invalid')
self.assertEqual(flow.ref, refs[1])
# Mode 'valid'
transforms = [['rotation', 256, 256, 30]]
flow_s = Flow.from_transforms(transforms, shape, 's')
flow_t = Flow.from_transforms(transforms, shape, 't')
switched_s = flow_t.switch_ref()
self.assertIsNone(np.testing.assert_allclose(switched_s.vecs_numpy[switched_s.mask_numpy],
flow_s.vecs_numpy[switched_s.mask_numpy],
rtol=1e-3, atol=1e-3))
switched_t = flow_s.switch_ref()
self.assertIsNone(np.testing.assert_allclose(switched_t.vecs_numpy[switched_t.mask_numpy],
flow_t.vecs_numpy[switched_t.mask_numpy],
rtol=1e-3, atol=1e-3))
# Invalid mode passed
flow = Flow.from_transforms([['rotation', 30, 50, 30]], shape, 't')
with self.assertRaises(ValueError):
flow.switch_ref('test')
with self.assertRaises(ValueError):
flow.switch_ref(1)
def test_invert(self):
f_s = Flow.from_transforms([['rotation', 256, 256, 30]], (512, 512), 's') # Forwards
f_t = Flow.from_transforms([['rotation', 256, 256, 30]], (512, 512), 't') # Forwards
b_s = Flow.from_transforms([['rotation', 256, 256, -30]], (512, 512), 's') # Backwards
b_t = Flow.from_transforms([['rotation', 256, 256, -30]], (512, 512), 't') # Backwards
# Inverting s to s
b_s_inv = f_s.invert()
self.assertIsNone(np.testing.assert_allclose(b_s_inv.vecs_numpy[b_s_inv.mask_numpy],
b_s.vecs_numpy[b_s_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
f_s_inv = b_s.invert()
self.assertIsNone(np.testing.assert_allclose(f_s_inv.vecs_numpy[f_s_inv.mask_numpy],
f_s.vecs_numpy[f_s_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
# Inverting s to t
b_t_inv = f_s.invert('t')
self.assertIsNone(np.testing.assert_allclose(b_t_inv.vecs_numpy[b_t_inv.mask_numpy],
b_t.vecs_numpy[b_t_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
f_t_inv = b_s.invert('t')
self.assertIsNone(np.testing.assert_allclose(f_t_inv.vecs_numpy[f_t_inv.mask_numpy],
f_t.vecs_numpy[f_t_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
# Inverting t to t
b_t_inv = f_t.invert()
self.assertIsNone(np.testing.assert_allclose(b_t_inv.vecs_numpy[b_t_inv.mask_numpy],
b_t.vecs_numpy[b_t_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
f_t_inv = b_t.invert()
self.assertIsNone(np.testing.assert_allclose(f_t_inv.vecs_numpy[f_t_inv.mask_numpy],
f_t.vecs_numpy[f_t_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
# Inverting t to s
b_s_inv = f_t.invert('s')
self.assertIsNone(np.testing.assert_allclose(b_s_inv.vecs_numpy[b_s_inv.mask_numpy],
b_s.vecs_numpy[b_s_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
f_s_inv = b_t.invert('s')
self.assertIsNone(np.testing.assert_allclose(f_s_inv.vecs_numpy[f_s_inv.mask_numpy],
f_s.vecs_numpy[f_s_inv.mask_numpy],
rtol=1e-3, atol=1e-3))
def test_track(self):
f_s = Flow.from_transforms([['rotation', 0, 0, 30]], (512, 512), 's')
f_t = Flow.from_transforms([['rotation', 0, 0, 30]], (512, 512), 't')
# Test valid status for 't' flow
f_t.mask[:, 200:] = False
pts = torch.tensor([
[0, 50], # Moved out of bounds by a valid flow vector
[0, 500], # Moved out of bounds by an invalid flow vector
[8.3, 7.2], # Moved normally by valid flow vector
[120.4, 160.2], # Moved normally by valid flow vector
[300, 200] # Moved normally by invalid flow vector
])
desired_valid_status = [False, False, True, True, False]
_, tracked = f_t.track(pts, get_valid_status=True)
self.assertIsNone(np.testing.assert_equal(to_numpy(tracked), desired_valid_status))
# Test valid status for 's' flow
f_s.mask[:, 200:] = False
pts = torch.tensor([
[0, 50], # Moved out of bounds by a valid flow vector
[0, 500], # Moved out of bounds by an invalid flow vector
[8.3, 7.2], # Moved normally by valid flow vector
[120.4, 160.2], # Moved normally by valid flow vector
[300, 200] # Moved normally by invalid flow vector
])
desired_valid_status = [False, False, True, True, False]
_, tracked = f_s.track(pts, get_valid_status=True)
self.assertIsNone(np.testing.assert_equal(to_numpy(tracked), desired_valid_status))
# Invalid inputs
with self.assertRaises(TypeError):
f_s.track(pts, True, get_valid_status='test')
def test_valid_target(self):
transforms = [['rotation', 0, 0, 45]]
shape = (7, 7)
mask = np.ones(shape, 'bool')
mask[4:, :3] = False
f_s_masked = Flow.from_transforms(transforms, shape, 's', mask)
mask = np.ones(shape, 'bool')
mask[:3, 4:] = False
f_t_masked = Flow.from_transforms(transforms, shape, 't', mask)
f_s = Flow.from_transforms(transforms, shape, 's')
f_t = Flow.from_transforms(transforms, shape, 't')
desired_area_s = np.array([
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype('bool')
desired_area_t = np.array([
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype('bool')
desired_area_s_masked_consider_mask = np.array([
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype('bool')
desired_area_s_masked = np.array([
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype('bool')
desired_area_t_masked = np.array([
[1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype('bool')
self.assertIsNone(np.testing.assert_equal(to_numpy(f_s.valid_target()), desired_area_s))
self.assertIsNone(np.testing.assert_equal(to_numpy(f_t.valid_target()), desired_area_t))
self.assertIsNone(np.testing.assert_equal(f_s_masked.valid_target(), desired_area_s_masked_consider_mask))
self.assertIsNone(np.testing.assert_equal(to_numpy(f_s_masked.valid_target(False)), desired_area_s_masked))
self.assertIsNone(np.testing.assert_equal(to_numpy(f_t_masked.valid_target()), desired_area_t_masked))
def test_valid_source(self):
transforms = [['rotation', 0, 0, 45]]
shape = (7, 7)
mask = np.ones(shape, 'bool')
mask[4:, :3] = False
f_s_masked = Flow.from_transforms(transforms, shape, 's', mask)
mask = np.ones(shape, 'bool')
mask[:3, 4:] = False
f_t_masked = Flow.from_transforms(transforms, shape, 't', mask)
f_s = Flow.from_transforms(transforms, shape, 's')
f_t = Flow.from_transforms(transforms, shape, 't')
desired_area_s = np.array([
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0]
]).astype('bool')
desired_area_t = np.array([
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0]
]).astype('bool')
desired_area_s_masked = np.array([
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]).astype('bool')
desired_area_t_masked_consider_mask = np.array([
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0]
]).astype('bool')
desired_area_t_masked = np.array([
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0]
]).astype('bool')
self.assertIsNone(np.testing.assert_equal(to_numpy(f_s.valid_source()), desired_area_s))
self.assertIsNone(np.testing.assert_equal(to_numpy(f_t.valid_source()), desired_area_t))
self.assertIsNone(np.testing.assert_equal(to_numpy(f_s_masked.valid_source()), desired_area_s_masked))
self.assertIsNone(np.testing.assert_equal(f_t_masked.valid_source(), desired_area_t_masked_consider_mask))
self.assertIsNone(np.testing.assert_equal(f_t_masked.valid_source(False), desired_area_t_masked))
def test_get_padding(self):
transforms = [['rotation', 0, 0, 45]]
shape = (7, 7)
mask = np.ones(shape, 'bool')
mask[:, 4:] = False
f_s_masked = Flow.from_transforms(transforms, shape, 's', mask)
mask = np.ones(shape, 'bool')
mask[4:] = False
f_t_masked = Flow.from_transforms(transforms, shape, 't', mask)
f_s = Flow.from_transforms(transforms, shape, 's')
f_t = Flow.from_transforms(transforms, shape, 't')
f_s_desired = [5, 0, 0, 3]
f_t_desired = [0, 3, 5, 0]
f_s_masked_desired = [3, 0, 0, 1]
f_t_masked_desired = [0, 1, 3, 0]
self.assertIsNone(np.testing.assert_equal(f_s.get_padding(), f_s_desired))
self.assertIsNone(np.testing.assert_equal(f_t.get_padding(), f_t_desired))
self.assertIsNone(np.testing.assert_equal(f_s_masked.get_padding(), f_s_masked_desired))
self.assertIsNone(np.testing.assert_equal(f_t_masked.get_padding(), f_t_masked_desired))
f = Flow.zero(shape)
f._vecs[0] = torch.rand(*shape) * 1e-4
self.assertIsNone(np.testing.assert_equal(f.get_padding(), [0, 0, 0, 0]))
def test_is_zero(self):
shape = (10, 10)
mask = np.ones(shape, 'bool')
mask[0, 0] = False
flow = np.zeros(shape + (2,))
flow[0, 0] = 10
flow = Flow(flow, mask=mask)
self.assertEqual(flow.is_zero(), True)
self.assertEqual(flow.is_zero(masked=True), True)
self.assertEqual(flow.is_zero(masked=False), False)
with self.assertRaises(TypeError): # Masked wrong type
flow.is_zero(masked='test')
def test_visualise(self):
# Correct values for the different modes
# Horizontal flow towards the right is red
flow = Flow.from_transforms([['translation', 1, 0]], [200, 300])
desired_img = np.tile(np.array([0, 0, 255]).reshape((1, 1, 3)), (200, 300, 1))
self.assertIsNone(np.testing.assert_equal(flow.visualise('bgr', return_tensor=False), desired_img))
self.assertIsNone(np.testing.assert_equal(flow.visualise('rgb', return_tensor=False), desired_img[..., ::-1]))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 0], 0))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 1], 255))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 2], 255))
# Flow outwards at the angle of 240 degrees (counter-clockwise) is green
flow = Flow.from_transforms([['translation', -1, math.sqrt(3)]], [200, 300])
desired_img = np.tile(np.array([0, 255, 0]).reshape((1, 1, 3)), (200, 300, 1))
self.assertIsNone(np.testing.assert_equal(flow.visualise('bgr', return_tensor=False), desired_img))
self.assertIsNone(np.testing.assert_equal(flow.visualise('rgb', return_tensor=False), desired_img))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 0], 60))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 1], 255))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 2], 255))
# Flow outwards at the angle of 240 degrees (counter-clockwise) is blue
flow = Flow.from_transforms([['translation', -1, -math.sqrt(3)]], [200, 300])
desired_img = np.tile(np.array([255, 0, 0]).reshape((1, 1, 3)), (200, 300, 1))
self.assertIsNone(np.testing.assert_equal(flow.visualise('bgr', return_tensor=False), desired_img))
self.assertIsNone(np.testing.assert_equal(flow.visualise('rgb', return_tensor=False), desired_img[..., ::-1]))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 0], 120))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 1], 255))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', return_tensor=False)[..., 2], 255))
# Show the flow mask
mask = np.zeros((200, 300))
mask[30:-30, 40:-40] = 1
flow = Flow.from_transforms([['translation', 1, 0]], (200, 300), 't', mask)
self.assertIsNone(np.testing.assert_equal(flow.visualise('bgr', True, return_tensor=False)[10, 10],
[0, 0, 180]))
self.assertIsNone(np.testing.assert_equal(flow.visualise('rgb', True, return_tensor=False)[10, 10],
[180, 0, 0]))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, return_tensor=False)[..., 0], 0))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, return_tensor=False)[..., 1], 255))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, return_tensor=False)[10, 10, 2], 180))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, return_tensor=False)[100, 100, 2], 255))
# Show the flow mask border
mask = np.zeros((200, 300))
mask[30:-30, 40:-40] = 1
flow = Flow.from_transforms([['translation', 1, 0]], (200, 300), 't', mask)
self.assertIsNone(np.testing.assert_equal(flow.visualise('bgr', True, True, return_tensor=False)[30, 40],
[0, 0, 0]))
self.assertIsNone(np.testing.assert_equal(flow.visualise('rgb', True, True, return_tensor=False)[30, 40],
[0, 0, 0]))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, True, return_tensor=False)[..., 0], 0))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, True, return_tensor=False)[30, 40, 1], 0))
self.assertIsNone(np.testing.assert_equal(flow.visualise('hsv', True, True, return_tensor=False)[30, 40, 2], 0))
# Output is tensor if required
mask = np.zeros((200, 300))
mask[30:-30, 40:-40] = 1
flow = Flow.from_transforms([['translation', 1, 0]], (200, 300), 't', mask)
self.assertIsInstance(flow.visualise('bgr', True, True), torch.Tensor)
# Invalid arguments
flow = Flow.zero([10, 10])
with self.assertRaises(ValueError):
flow.visualise(mode=3)
with self.assertRaises(ValueError):
flow.visualise(mode='test')
with self.assertRaises(TypeError):
flow.visualise('rgb', show_mask=2)
with self.assertRaises(TypeError):
flow.visualise('rgb', show_mask_borders=2)
with self.assertRaises(TypeError):
flow.visualise('rgb', return_tensor=2)
with self.assertRaises(TypeError):
flow.visualise('rgb', range_max='2')
with self.assertRaises(ValueError):
flow.visualise('rgb', range_max=-1)
def test_visualise_arrows(self):
img = cv2.imread('smudge.png')
mask = np.zeros(img.shape[:2])
mask[50:-50, 20:-20] = 1
flow = Flow.from_transforms([['rotation', 256, 256, 30]], img.shape[:2], 's', mask)
for scaling in [0.1, 1, 2]:
for show_mask in [True, False]:
for show_mask_border in [True, False]:
for return_tensor in [True, False]:
img = flow.visualise_arrows(
grid_dist=10,
scaling=scaling,
show_mask=show_mask,
show_mask_borders=show_mask_border,
return_tensor=return_tensor
)
if return_tensor:
self.assertIsInstance(img, torch.Tensor)
else:
self.assertIsInstance(img, np.ndarray)
with self.assertRaises(TypeError):
flow.visualise_arrows(grid_dist='test')
with self.assertRaises(ValueError):
flow.visualise_arrows(grid_dist=-1)
with self.assertRaises(TypeError):
flow.visualise_arrows(10, img='test')
with self.assertRaises(ValueError):
flow.visualise_arrows(10, img=mask)
with self.assertRaises(ValueError):
flow.visualise_arrows(10, img=mask[10:])
with self.assertRaises(ValueError):
flow.visualise_arrows(10, img=img[..., :2])
with self.assertRaises(TypeError):
flow.visualise_arrows(10, img, scaling='test')
with self.assertRaises(ValueError):
flow.visualise_arrows(10, img, scaling=-1)
with self.assertRaises(TypeError):
flow.visualise_arrows(10, img, None, show_mask='test')
with self.assertRaises(TypeError):
flow.visualise_arrows(10, img, None, True, show_mask_borders='test')
with self.assertRaises(TypeError):
flow.visualise_arrows(10, img, None, True, True, colour='test')
with self.assertRaises(ValueError):
flow.visualise_arrows(10, img, None, True, True, colour=(0, 0))
with self.assertRaises(TypeError):
flow.visualise_arrows(10, img, None, True, True, colour=(0, 0, 0), return_tensor='test')
def test_show(self):
flow = Flow.zero([200, 300])
with self.assertRaises(TypeError):
flow.show('test')
with self.assertRaises(ValueError):
flow.show(-1)
def test_show_arrows(self):
flow = Flow.zero([200, 300])
with self.assertRaises(TypeError):
flow.show_arrows('test')
with self.assertRaises(ValueError):
flow.show_arrows(-1)
def test_matrix(self):
# Partial affine transform, test reconstruction with all methods
transforms = [
['translation', 2, 1],
['rotation', 20, 20, 30],
['scaling', 10, 10, 1.1]
]
matrix = matrix_from_transforms(transforms)
flow_s = Flow.from_matrix(matrix, (100, 200), 's')
flow_t = Flow.from_matrix(matrix, (100, 200), 't')
actual_matrix_s = flow_s.matrix(dof=4, method='ransac')
actual_matrix_t = flow_t.matrix(dof=4, method='ransac')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-3))
actual_matrix_s = flow_s.matrix(dof=4, method='lmeds')
actual_matrix_t = flow_t.matrix(dof=4, method='lmeds')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-3))
actual_matrix_s = flow_s.matrix(dof=6, method='ransac')
actual_matrix_t = flow_t.matrix(dof=6, method='ransac')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-3))
actual_matrix_s = flow_s.matrix(dof=6, method='lmeds')
actual_matrix_t = flow_t.matrix(dof=6, method='lmeds')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-3))
actual_matrix_s = flow_s.matrix(dof=8, method='lms')
actual_matrix_t = flow_t.matrix(dof=8, method='lms')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6, atol=1e-4))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-6, atol=1e-4))
actual_matrix_s = flow_s.matrix(dof=8, method='ransac')
actual_matrix_t = flow_t.matrix(dof=8, method='ransac')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6, atol=1e-4))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-6, atol=1e-4))
actual_matrix_s = flow_s.matrix(dof=8, method='lmeds')
actual_matrix_t = flow_t.matrix(dof=8, method='lmeds')
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_s), matrix, rtol=1e-6, atol=1e-4))
self.assertIsNone(np.testing.assert_allclose(to_numpy(actual_matrix_t), matrix, rtol=1e-6, atol=1e-4))
# Random matrix, check to see how often an approximate 'reconstruction' fails, target is <5% of cases
failed = 0
for i in range(1000):
matrix = (np.random.rand(3, 3) - .5) * 20
if -1e-4 < matrix[2, 2] < 1e-4:
matrix[2, 2] = 0
else:
matrix /= matrix[2, 2]
flow_s = Flow.from_matrix(matrix, (50, 100), 's')
try:
np.testing.assert_allclose(flow_s.matrix(8, 'lms'), matrix, atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(flow_s.matrix(8, 'ransac'), matrix, atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(flow_s.matrix(8, 'lmeds'), matrix, atol=1e-2, rtol=1e-2)
except AssertionError:
failed += 1
self.assertTrue(failed <= 50)
# Partial affine transform reconstruction in the presence of noise, only check first 4 values
matrix = matrix_from_transforms(transforms)
flow_s = Flow.from_matrix(matrix, (100, 200), 's')
flow_noise = (np.random.rand(100, 200, 2) - .5) * 5
actual_matrix_4_ransac = (flow_s + flow_noise).matrix(4, 'ransac')
actual_matrix_4_lmeds = (flow_s + flow_noise).matrix(4, 'lmeds')
actual_matrix_6_ransac = (flow_s + flow_noise).matrix(6, 'ransac')
actual_matrix_6_lmeds = (flow_s + flow_noise).matrix(6, 'lmeds')
actual_matrix_8_lms = (flow_s + flow_noise).matrix(8, 'lms')
actual_matrix_8_ransac = (flow_s + flow_noise).matrix(8, 'ransac')
actual_matrix_8_lmeds = (flow_s + flow_noise).matrix(8, 'lmeds')
for actual_matrix in [actual_matrix_4_ransac, actual_matrix_4_lmeds,
actual_matrix_6_ransac, actual_matrix_6_lmeds,
actual_matrix_8_lms, actual_matrix_8_ransac, actual_matrix_8_lmeds]:
self.assertIsNone(np.testing.assert_allclose(actual_matrix[:2, :2], matrix[:2, :2], atol=1e-2, rtol=1e-1))
# Masked vs non-masked matrix fitting
matrix = matrix_from_transforms(transforms)
mask = np.zeros((100, 200), 'bool')
mask[:50, :50] = 1 # upper left corner will contain the real values
flow = Flow.from_matrix(matrix, (100, 200), 's', mask)
random_vecs = (np.random.rand(2, 100, 200) - 0.5) * 200
random_vecs[:, :50, :50] = flow.vecs[:, :50, :50]
flow.vecs = random_vecs
# Make sure this fails with the 'lmeds' method:
with self.assertRaises(AssertionError):
np.testing.assert_allclose(flow.matrix(4, 'lmeds', False), matrix)
# Test that it does NOT fail when the invalid flow elements are masked out
self.assertIsNone(np.testing.assert_allclose(flow.matrix(4, 'lmeds', True), matrix))
# Fallback of 'lms' to 'ransac' when dof == 4 or dof == 6
matrix = matrix_from_transforms(transforms)
flow_s = Flow.from_matrix(matrix, (100, 200), 's')
actual_matrix_s_lms = flow_s.matrix(dof=4, method='lms')
actual_matrix_s_ransac = flow_s.matrix(dof=4, method='ransac')
self.assertIsNone(np.testing.assert_equal(to_numpy(actual_matrix_s_lms), to_numpy(actual_matrix_s_ransac)))
# Invalid inputs
matrix = matrix_from_transforms(transforms)
flow_s = Flow.from_matrix(matrix, (100, 200), 's')
with self.assertRaises(ValueError):
flow_s.matrix(dof='test')
with self.assertRaises(ValueError):
flow_s.matrix(dof=5)
with self.assertRaises(ValueError):
flow_s.matrix(dof=4, method='test')
with self.assertRaises(TypeError):
flow_s.matrix(dof=4, method='lms', masked='test')
def test_combine_with(self):
img = cv2.imread('smudge.png')
shape = img.shape[:2]
transforms = [
['rotation', 255.5, 255.5, -30],
['scaling', 100, 100, 0.8],
]
for ref in ['s', 't']:
f1 = Flow.from_transforms(transforms[0:1], shape, ref)
f2 = Flow.from_transforms(transforms[1:2], shape, ref)
f3 = Flow.from_transforms(transforms, shape, ref)
# Mode 1
f1_actual = f2.combine_with(f3, 1)
# f1.show(500, show_mask=True, show_mask_borders=True)
# f1_actual.show(show_mask=True, show_mask_borders=True)
self.assertIsInstance(f1_actual, Flow)
self.assertEqual(f1_actual.ref, ref)
comb_mask = f1_actual.mask_numpy & f1.mask_numpy
self.assertIsNone(np.testing.assert_allclose(f1_actual.vecs_numpy[comb_mask], f1.vecs_numpy[comb_mask],
atol=5e-2))
# Mode 2
f2_actual = f1.combine_with(f3, 2)
# f2.show(500, show_mask=True, show_mask_borders=True)
# f2_actual.show(show_mask=True, show_mask_borders=True)
self.assertIsInstance(f2_actual, Flow)
self.assertEqual(f2_actual.ref, ref)
comb_mask = f2_actual.mask_numpy & f2.mask_numpy
self.assertIsNone(np.testing.assert_allclose(f2_actual.vecs_numpy[comb_mask], f2.vecs_numpy[comb_mask],
atol=5e-2))
# Mode 3
f3_actual = f1.combine_with(f2, 3)
# f3.show(500, show_mask=True, show_mask_borders=True)
# f3_actual.show(show_mask=True, show_mask_borders=True)
self.assertIsInstance(f3_actual, Flow)
self.assertEqual(f3_actual.ref, ref)
comb_mask = f3_actual.mask_numpy & f3.mask_numpy
self.assertIsNone(np.testing.assert_allclose(f3_actual.vecs_numpy[comb_mask], f3.vecs_numpy[comb_mask],
atol=5e-2))
# Invalid inputs
fs = Flow.from_transforms(transforms[0:1], [20, 20], 's')
ft = Flow.from_transforms(transforms[1:2], [20, 20], 't')
fs2 = Flow.from_transforms(transforms[0:1], [20, 30], 's')
with self.assertRaises(TypeError): # Flow not a Flow object
fs.combine_with(fs.vecs, 1)
with self.assertRaises(ValueError): # Flow not the same shape
fs.combine_with(fs2, 1)
with self.assertRaises(ValueError): # Flow not the same reference
fs.combine_with(ft, 1)
with self.assertRaises(ValueError): # Mode not 1, 2 or 3
fs.combine_with(fs, mode=0)
with self.assertRaises(TypeError): # Thresholded not boolean
fs.combine_with(fs, 1, thresholded='test')
if __name__ == '__main__':
unittest.main()
| 52.808494
| 120
| 0.562885
| 9,076
| 68,387
| 4.083297
| 0.047378
| 0.01592
| 0.017242
| 0.017701
| 0.817053
| 0.777658
| 0.742229
| 0.695872
| 0.663384
| 0.602024
| 0
| 0.064284
| 0.302119
| 68,387
| 1,294
| 121
| 52.849304
| 0.712232
| 0.082516
| 0
| 0.505474
| 0
| 0
| 0.020433
| 0.000383
| 0
| 0
| 0
| 0
| 0.270073
| 1
| 0.029197
| false
| 0
| 0.007299
| 0
| 0.037409
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 4
|
46fab5fe057959b5d24e56de0f96ac30c03704cc
| 245
|
py
|
Python
|
B-CNA-410-LYN-4-1-groundhog/src/r.py
|
Neotoxic-off/Epitech2024
|
8b3dd04fa9ac2b7019c0b5b1651975a7252d929b
|
[
"Apache-2.0"
] | 2
|
2022-02-07T12:44:51.000Z
|
2022-02-08T12:04:08.000Z
|
B-CNA-410-LYN-4-1-groundhog/src/r.py
|
Neotoxic-off/Epitech2024
|
8b3dd04fa9ac2b7019c0b5b1651975a7252d929b
|
[
"Apache-2.0"
] | null | null | null |
B-CNA-410-LYN-4-1-groundhog/src/r.py
|
Neotoxic-off/Epitech2024
|
8b3dd04fa9ac2b7019c0b5b1651975a7252d929b
|
[
"Apache-2.0"
] | 1
|
2022-01-23T21:26:06.000Z
|
2022-01-23T21:26:06.000Z
|
#!/usr/bin/env python
from math import sqrt
def r(inputs, period):
if (len(inputs) > period):
if (inputs[-(1 + period)] - 1 > 0):
return (round((inputs[-1] / inputs[-(1 + period)] - 1) * 100))
return ("nan")
| 27.222222
| 75
| 0.518367
| 33
| 245
| 3.848485
| 0.606061
| 0.165354
| 0.220472
| 0.220472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051429
| 0.285714
| 245
| 9
| 76
| 27.222222
| 0.674286
| 0.081633
| 0
| 0
| 0
| 0
| 0.013825
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.