hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e69830daba3d8eb596cd662be830c8f4c91115b6
| 173
|
py
|
Python
|
trebelge/trebelge/doctype/ubl_tr_application_response/test_ubl_tr_application_response.py
|
Framras/trebelge
|
362179925dc688ad8ea008f532de72e67e49941b
|
[
"MIT"
] | 6
|
2019-12-21T21:15:50.000Z
|
2021-12-30T21:59:53.000Z
|
trebelge/trebelge/doctype/ubl_tr_application_response/test_ubl_tr_application_response.py
|
Framras/trebelge
|
362179925dc688ad8ea008f532de72e67e49941b
|
[
"MIT"
] | null | null | null |
trebelge/trebelge/doctype/ubl_tr_application_response/test_ubl_tr_application_response.py
|
Framras/trebelge
|
362179925dc688ad8ea008f532de72e67e49941b
|
[
"MIT"
] | 3
|
2020-01-05T19:32:40.000Z
|
2021-11-03T14:11:21.000Z
|
# Copyright (c) 2022, Framras AS-Izmir and Contributors
# See license.txt
# import frappe
import unittest
class TestUBLTRApplicationResponse(unittest.TestCase):
pass
| 17.3
| 55
| 0.780347
| 20
| 173
| 6.75
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027211
| 0.150289
| 173
| 9
| 56
| 19.222222
| 0.891156
| 0.479769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e69dd1d9f29b027e6f74e3d7d50d279d7ce94503
| 33
|
py
|
Python
|
__init__.py
|
Harish-developments/PyReader
|
8fbf874d2d643683f142d9cd03cbe59d90fcc3bd
|
[
"MIT"
] | 5
|
2021-11-01T23:23:36.000Z
|
2021-11-13T06:51:15.000Z
|
__init__.py
|
Harish-developments/PyReader
|
8fbf874d2d643683f142d9cd03cbe59d90fcc3bd
|
[
"MIT"
] | 1
|
2021-11-03T06:50:53.000Z
|
2021-11-03T06:50:53.000Z
|
__init__.py
|
Harish-developments/PyReader
|
8fbf874d2d643683f142d9cd03cbe59d90fcc3bd
|
[
"MIT"
] | 1
|
2021-11-15T12:49:18.000Z
|
2021-11-15T12:49:18.000Z
|
from .PyReader import open,read
| 16.5
| 32
| 0.787879
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 1
| 33
| 33
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6a30730468671e6d8e28cbc24503c950d6d2293
| 4,511
|
py
|
Python
|
svca_limix/limix/test/gp/test_gplvm.py
|
DenisSch/svca
|
bd029c120ca8310f43311253e4d7ce19bc08350c
|
[
"Apache-2.0"
] | 65
|
2015-01-20T20:46:26.000Z
|
2021-06-27T14:40:35.000Z
|
svca_limix/limix/test/gp/test_gplvm.py
|
DenisSch/svca
|
bd029c120ca8310f43311253e4d7ce19bc08350c
|
[
"Apache-2.0"
] | 29
|
2015-02-01T22:35:17.000Z
|
2017-08-07T08:18:23.000Z
|
svca_limix/limix/test/gp/test_gplvm.py
|
DenisSch/svca
|
bd029c120ca8310f43311253e4d7ce19bc08350c
|
[
"Apache-2.0"
] | 35
|
2015-02-01T17:26:50.000Z
|
2019-09-13T07:06:16.000Z
|
"""GP testing code"""
import unittest
import scipy as SP
import numpy as np
import limix.deprecated as dlimix
import scipy.linalg as linalg
def PCA(Y, components):
"""run PCA, retrieving the first (components) principle components
return [s0, eig, w0]
s0: factors
w0: weights
"""
sv = linalg.svd(Y, full_matrices=0);
[s0, w0] = [sv[0][:, 0:components], SP.dot(SP.diag(sv[1]), sv[2]).T[:, 0:components]]
v = s0.std(axis=0)
s0 /= v;
w0 *= v;
return [s0, w0]
class CGPLVM_test(unittest.TestCase):
"""oGPLVM test class"""
def simulate(self):
"""simulate a dataset. Note this is seed-dependent"""
N = self.settings['N']
K = self.settings['K']
D = self.settings['D']
SP.random.seed(1)
S = SP.random.randn(N,K)
W = SP.random.randn(D,K)
Y = SP.dot(W,S.T).T
Y+= 0.1*SP.random.randn(N,D)
X0 = SP.random.randn(N,K)
X0 = PCA(Y,K)[0]
RV = {'X0': X0,'Y':Y,'S':S,'W':W}
return RV
def setUp(self):
SP.random.seed(1)
#1. simulate
self.settings = {'K':5,'N':100,'D':80}
self.simulation = self.simulate()
N = self.settings['N']
K = self.settings['K']
D = self.settings['D']
#2. setup GP
covar = dlimix.CCovLinearISO(K)
ll = dlimix.CLikNormalIso()
#create hyperparm
covar_params = SP.array([1.0])
lik_params = SP.array([1.0])
hyperparams = dlimix.CGPHyperParams()
hyperparams['covar'] = covar_params
hyperparams['lik'] = lik_params
hyperparams['X'] = self.simulation['X0']
#cretae GP
self.gp=dlimix.CGPbase(covar,ll)
#set data
self.gp.setY(self.simulation['Y'])
self.gp.setX(self.simulation['X0'])
self.gp.setParams(hyperparams)
pass
@unittest.skip("someone has to fix it")
def test_fit(self):
#create optimization object
self.gpopt = dlimix.CGPopt(self.gp)
#run
RV = self.gpopt.opt()
RV = self.gpopt.opt()
m = (SP.absolute(self.gp.LMLgrad()['X']).max() +
SP.absolute(self.gp.LMLgrad()['covar']).max() +
SP.absolute(self.gp.LMLgrad()['lik']).max())
np.testing.assert_almost_equal(m, 0., decimal=1)
class CGPLVM_test_constK(unittest.TestCase):
"""adapted version of GPLVM test, including a fixed CF covaraince"""
def simulate(self):
"""simulate a dataset. Note this is seed-dependent"""
N = self.settings['N']
K = self.settings['K']
D = self.settings['D']
SP.random.seed(1)
S = SP.random.randn(N,K)
W = SP.random.randn(D,K)
Y = SP.dot(W,S.T).T
Y+= 0.1*SP.random.randn(N,D)
X0 = SP.random.randn(N,K)
X0 = PCA(Y,K)[0]
RV = {'X0': X0,'Y':Y,'S':S,'W':W}
return RV
def setUp(self):
SP.random.seed(1)
#1. simulate
self.settings = {'K':5,'N':100,'D':80}
self.simulation = self.simulate()
N = self.settings['N']
K = self.settings['K']
D = self.settings['D']
#2. setup GP
K0 = SP.dot(self.simulation['S'],self.simulation['S'].T)
K0[:] = 0
covar1 = dlimix.CFixedCF(K0)
covar2 = dlimix.CCovLinearISO(K)
covar = dlimix.CSumCF()
covar.addCovariance(covar1)
covar.addCovariance(covar2)
ll = dlimix.CLikNormalIso()
#create hyperparm
covar_params = SP.array([0.0,1.0])
lik_params = SP.array([0.1])
hyperparams = dlimix.CGPHyperParams()
hyperparams['covar'] = covar_params
hyperparams['lik'] = lik_params
hyperparams['X'] = self.simulation['X0']
#cretae GP
self.gp=dlimix.CGPbase(covar,ll)
#set data
self.gp.setY(self.simulation['Y'])
self.gp.setX(self.simulation['X0'])
self.gp.setParams(hyperparams)
pass
@unittest.skip("someone has to fix it")
def test_fit(self):
#create optimization object
self.gpopt = dlimix.CGPopt(self.gp)
#run
RV = self.gpopt.opt()
RV = self.gpopt.opt()
m = (SP.absolute(self.gp.LMLgrad()['X']).max() +
SP.absolute(self.gp.LMLgrad()['covar']).max() +
SP.absolute(self.gp.LMLgrad()['lik']).max())
np.testing.assert_almost_equal(m, 0., decimal=1)
if __name__ == '__main__':
unittest.main()
| 27.174699
| 89
| 0.548215
| 607
| 4,511
| 4.031301
| 0.212521
| 0.039232
| 0.042501
| 0.034328
| 0.73396
| 0.733143
| 0.718431
| 0.718431
| 0.718431
| 0.674295
| 0
| 0.024261
| 0.287298
| 4,511
| 165
| 90
| 27.339394
| 0.736858
| 0.10419
| 0
| 0.735849
| 0
| 0
| 0.031658
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 1
| 0.066038
| false
| 0.018868
| 0.04717
| 0
| 0.160377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc16361073a7eb5c27cd1b084b03df7af4e6f9f8
| 108,321
|
py
|
Python
|
tests/unit/gapic/dialogflowcx_v3beta1/test_transition_route_groups.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflowcx_v3beta1/test_transition_route_groups.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/dialogflowcx_v3beta1/test_transition_route_groups.py
|
nicain/python-dialogflow-cx
|
2292ff540aea24c3c831a5ffe1604c2c022ccb82
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.transition_route_groups import (
TransitionRouteGroupsAsyncClient,
)
from google.cloud.dialogflowcx_v3beta1.services.transition_route_groups import (
TransitionRouteGroupsClient,
)
from google.cloud.dialogflowcx_v3beta1.services.transition_route_groups import pagers
from google.cloud.dialogflowcx_v3beta1.services.transition_route_groups import (
transports,
)
from google.cloud.dialogflowcx_v3beta1.types import fulfillment
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import response_message
from google.cloud.dialogflowcx_v3beta1.types import transition_route_group
from google.cloud.dialogflowcx_v3beta1.types import (
transition_route_group as gcdc_transition_route_group,
)
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TransitionRouteGroupsClient._get_default_mtls_endpoint(None) is None
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TransitionRouteGroupsClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TransitionRouteGroupsClient, TransitionRouteGroupsAsyncClient,]
)
def test_transition_route_groups_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TransitionRouteGroupsGrpcTransport, "grpc"),
(transports.TransitionRouteGroupsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_transition_route_groups_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [TransitionRouteGroupsClient, TransitionRouteGroupsAsyncClient,]
)
def test_transition_route_groups_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_transition_route_groups_client_get_transport_class():
transport = TransitionRouteGroupsClient.get_transport_class()
available_transports = [
transports.TransitionRouteGroupsGrpcTransport,
]
assert transport in available_transports
transport = TransitionRouteGroupsClient.get_transport_class("grpc")
assert transport == transports.TransitionRouteGroupsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TransitionRouteGroupsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsClient),
)
@mock.patch.object(
TransitionRouteGroupsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsAsyncClient),
)
def test_transition_route_groups_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TransitionRouteGroupsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TransitionRouteGroupsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
"true",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
"false",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TransitionRouteGroupsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsClient),
)
@mock.patch.object(
TransitionRouteGroupsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_transition_route_groups_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [TransitionRouteGroupsClient, TransitionRouteGroupsAsyncClient]
)
@mock.patch.object(
TransitionRouteGroupsClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsClient),
)
@mock.patch.object(
TransitionRouteGroupsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TransitionRouteGroupsAsyncClient),
)
def test_transition_route_groups_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_transition_route_groups_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
grpc_helpers,
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_transition_route_groups_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_transition_route_groups_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TransitionRouteGroupsClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TransitionRouteGroupsClient,
transports.TransitionRouteGroupsGrpcTransport,
"grpc",
grpc_helpers,
),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_transition_route_groups_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [transition_route_group.ListTransitionRouteGroupsRequest, dict,]
)
def test_list_transition_route_groups(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse(
next_page_token="next_page_token_value",
)
response = client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransitionRouteGroupsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_transition_route_groups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
client.list_transition_route_groups()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
@pytest.mark.asyncio
async def test_list_transition_route_groups_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.ListTransitionRouteGroupsRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.ListTransitionRouteGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTransitionRouteGroupsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_from_dict():
await test_list_transition_route_groups_async(request_type=dict)
def test_list_transition_route_groups_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.ListTransitionRouteGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_transition_route_groups_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.ListTransitionRouteGroupsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse()
)
await client.list_transition_route_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_transition_route_groups_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_transition_route_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_transition_route_groups_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_transition_route_groups(
transition_route_group.ListTransitionRouteGroupsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_transition_route_groups_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.ListTransitionRouteGroupsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.ListTransitionRouteGroupsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_transition_route_groups(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_transition_route_groups_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_transition_route_groups(
transition_route_group.ListTransitionRouteGroupsRequest(),
parent="parent_value",
)
def test_list_transition_route_groups_pager(transport_name: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_transition_route_groups(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, transition_route_group.TransitionRouteGroup) for i in results
)
def test_list_transition_route_groups_pages(transport_name: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
pages = list(client.list_transition_route_groups(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_pager():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
async_pager = await client.list_transition_route_groups(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, transition_route_group.TransitionRouteGroup)
for i in responses
)
@pytest.mark.asyncio
async def test_list_transition_route_groups_async_pages():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_transition_route_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
next_page_token="abc",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[], next_page_token="def",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
],
next_page_token="ghi",
),
transition_route_group.ListTransitionRouteGroupsResponse(
transition_route_groups=[
transition_route_group.TransitionRouteGroup(),
transition_route_group.TransitionRouteGroup(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_transition_route_groups(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [transition_route_group.GetTransitionRouteGroupRequest, dict,]
)
def test_get_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_get_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
client.get_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
@pytest.mark.asyncio
async def test_get_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.GetTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_get_transition_route_group_async_from_dict():
await test_get_transition_route_group_async(request_type=dict)
def test_get_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.GetTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
call.return_value = transition_route_group.TransitionRouteGroup()
client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.GetTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup()
)
await client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_transition_route_group(
transition_route_group.GetTransitionRouteGroupRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_transition_route_group(
transition_route_group.GetTransitionRouteGroupRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[gcdc_transition_route_group.CreateTransitionRouteGroupRequest, dict,],
)
def test_create_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_create_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
client.create_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
@pytest.mark.asyncio
async def test_create_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=gcdc_transition_route_group.CreateTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_create_transition_route_group_async_from_dict():
await test_create_transition_route_group_async(request_type=dict)
def test_create_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
await client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_transition_route_group(
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
def test_create_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_transition_route_group(
gcdc_transition_route_group.CreateTransitionRouteGroupRequest(),
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_transition_route_group(
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_transition_route_group(
gcdc_transition_route_group.CreateTransitionRouteGroupRequest(),
parent="parent_value",
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type",
[gcdc_transition_route_group.UpdateTransitionRouteGroupRequest, dict,],
)
def test_update_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_update_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
client.update_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
)
@pytest.mark.asyncio
async def test_update_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=gcdc_transition_route_group.UpdateTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_update_transition_route_group_async_from_dict():
await test_update_transition_route_group_async(request_type=dict)
def test_update_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
request.transition_route_group.name = "transition_route_group.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"transition_route_group.name=transition_route_group.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.UpdateTransitionRouteGroupRequest()
request.transition_route_group.name = "transition_route_group.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
await client.update_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"transition_route_group.name=transition_route_group.name/value",
) in kw["metadata"]
def test_update_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_transition_route_group(
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_transition_route_group(
gcdc_transition_route_group.UpdateTransitionRouteGroupRequest(),
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_transition_route_group(
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].transition_route_group
mock_val = gcdc_transition_route_group.TransitionRouteGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_transition_route_group(
gcdc_transition_route_group.UpdateTransitionRouteGroupRequest(),
transition_route_group=gcdc_transition_route_group.TransitionRouteGroup(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [transition_route_group.DeleteTransitionRouteGroupRequest, dict,]
)
def test_delete_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
client.delete_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
@pytest.mark.asyncio
async def test_delete_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.DeleteTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.DeleteTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_transition_route_group_async_from_dict():
await test_delete_transition_route_group_async(request_type=dict)
def test_delete_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.DeleteTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
call.return_value = None
client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.DeleteTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_transition_route_group(
transition_route_group.DeleteTransitionRouteGroupRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_transition_route_group(
transition_route_group.DeleteTransitionRouteGroupRequest(),
name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TransitionRouteGroupsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TransitionRouteGroupsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TransitionRouteGroupsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TransitionRouteGroupsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(client.transport, transports.TransitionRouteGroupsGrpcTransport,)
def test_transition_route_groups_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TransitionRouteGroupsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_transition_route_groups_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TransitionRouteGroupsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_transition_route_groups",
"get_transition_route_group",
"create_transition_route_group",
"update_transition_route_group",
"delete_transition_route_group",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_transition_route_groups_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TransitionRouteGroupsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_transition_route_groups_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.transports.TransitionRouteGroupsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TransitionRouteGroupsTransport()
adc.assert_called_once()
def test_transition_route_groups_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TransitionRouteGroupsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TransitionRouteGroupsGrpcTransport, grpc_helpers),
(transports.TransitionRouteGroupsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_transition_route_groups_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_transition_route_groups_host_no_port():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_transition_route_groups_host_with_port():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_transition_route_groups_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TransitionRouteGroupsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_transition_route_groups_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TransitionRouteGroupsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TransitionRouteGroupsGrpcTransport,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
],
)
def test_transition_route_groups_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_flow_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
actual = TransitionRouteGroupsClient.flow_path(project, location, agent, flow)
assert expected == actual
def test_parse_flow_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"flow": "mussel",
}
path = TransitionRouteGroupsClient.flow_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_flow_path(path)
assert expected == actual
def test_intent_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
intent = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
actual = TransitionRouteGroupsClient.intent_path(project, location, agent, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"intent": "octopus",
}
path = TransitionRouteGroupsClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_intent_path(path)
assert expected == actual
def test_page_path():
project = "oyster"
location = "nudibranch"
agent = "cuttlefish"
flow = "mussel"
page = "winkle"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
actual = TransitionRouteGroupsClient.page_path(project, location, agent, flow, page)
assert expected == actual
def test_parse_page_path():
expected = {
"project": "nautilus",
"location": "scallop",
"agent": "abalone",
"flow": "squid",
"page": "clam",
}
path = TransitionRouteGroupsClient.page_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_page_path(path)
assert expected == actual
def test_transition_route_group_path():
project = "whelk"
location = "octopus"
agent = "oyster"
flow = "nudibranch"
transition_route_group = "cuttlefish"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
actual = TransitionRouteGroupsClient.transition_route_group_path(
project, location, agent, flow, transition_route_group
)
assert expected == actual
def test_parse_transition_route_group_path():
expected = {
"project": "mussel",
"location": "winkle",
"agent": "nautilus",
"flow": "scallop",
"transition_route_group": "abalone",
}
path = TransitionRouteGroupsClient.transition_route_group_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_transition_route_group_path(path)
assert expected == actual
def test_webhook_path():
project = "squid"
location = "clam"
agent = "whelk"
webhook = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
actual = TransitionRouteGroupsClient.webhook_path(project, location, agent, webhook)
assert expected == actual
def test_parse_webhook_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"webhook": "mussel",
}
path = TransitionRouteGroupsClient.webhook_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_webhook_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TransitionRouteGroupsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = TransitionRouteGroupsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = TransitionRouteGroupsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = TransitionRouteGroupsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = TransitionRouteGroupsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = TransitionRouteGroupsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = TransitionRouteGroupsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = TransitionRouteGroupsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TransitionRouteGroupsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = TransitionRouteGroupsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TransitionRouteGroupsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TransitionRouteGroupsTransport, "_prep_wrapped_messages"
) as prep:
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TransitionRouteGroupsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TransitionRouteGroupsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TransitionRouteGroupsClient, transports.TransitionRouteGroupsGrpcTransport),
(
TransitionRouteGroupsAsyncClient,
transports.TransitionRouteGroupsGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| 38.168076
| 141
| 0.690411
| 11,664
| 108,321
| 6.118913
| 0.036437
| 0.077342
| 0.078183
| 0.0197
| 0.889563
| 0.86161
| 0.83653
| 0.795813
| 0.77177
| 0.755475
| 0
| 0.002889
| 0.23303
| 108,321
| 2,837
| 142
| 38.18153
| 0.856185
| 0.151411
| 0
| 0.666667
| 0
| 0
| 0.086086
| 0.042153
| 0
| 0
| 0
| 0
| 0.102128
| 1
| 0.039716
| false
| 0.000473
| 0.013239
| 0.000946
| 0.053901
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc54a44f67fa1e175dc87e66bcfefc6a5b5c2440
| 4,764
|
py
|
Python
|
torch/nn/qat/modules/conv.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 1
|
2021-06-17T13:02:45.000Z
|
2021-06-17T13:02:45.000Z
|
torch/nn/qat/modules/conv.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 1
|
2022-01-18T12:17:29.000Z
|
2022-01-18T12:17:29.000Z
|
torch/nn/qat/modules/conv.py
|
MagiaSN/pytorch
|
7513455c743d3d644b45a804902c1a0d14b69f45
|
[
"Intel"
] | 2
|
2021-07-02T10:18:21.000Z
|
2021-08-18T10:10:28.000Z
|
import torch.nn as nn
from torch.nn.intrinsic import ConvReLU2d, ConvReLU3d
class Conv2d(nn.Conv2d):
r"""
A Conv2d module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.Conv2d`, please see
https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
for documentation.
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nn.Conv2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros', qconfig=None):
super().__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight()
def forward(self, input):
return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
if type(mod) == ConvReLU2d:
mod = mod[0]
qconfig = mod.qconfig
qat_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
stride=mod.stride, padding=mod.padding, dilation=mod.dilation,
groups=mod.groups, bias=mod.bias is not None,
padding_mode=mod.padding_mode, qconfig=qconfig)
qat_conv.weight = mod.weight
qat_conv.bias = mod.bias
return qat_conv
class Conv3d(nn.Conv3d):
r"""
A Conv3d module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.Conv3d`, please see
https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
for documentation.
Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nn.Conv3d
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
qconfig=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
)
assert qconfig, "qconfig must be provided for QAT module"
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight()
def forward(self, input):
return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, (
"qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
assert mod.qconfig, "Input float module must have a valid qconfig"
if type(mod) == ConvReLU3d:
mod = mod[0]
qconfig = mod.qconfig
qat_conv = cls(
mod.in_channels,
mod.out_channels,
mod.kernel_size,
stride=mod.stride,
padding=mod.padding,
dilation=mod.dilation,
groups=mod.groups,
bias=mod.bias is not None,
padding_mode=mod.padding_mode,
qconfig=qconfig,
)
qat_conv.weight = mod.weight
qat_conv.bias = mod.bias
return qat_conv
| 34.273381
| 104
| 0.616079
| 576
| 4,764
| 4.918403
| 0.177083
| 0.054359
| 0.031768
| 0.029651
| 0.925521
| 0.925521
| 0.90293
| 0.90293
| 0.90293
| 0.90293
| 0
| 0.00899
| 0.299538
| 4,764
| 138
| 105
| 34.521739
| 0.839976
| 0.233207
| 0
| 0.297872
| 0
| 0
| 0.097421
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 1
| 0.06383
| false
| 0
| 0.021277
| 0.021277
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc76355b33841412e9a45a652bf3f5a3bd0e1e82
| 15,591
|
py
|
Python
|
plotly/tests/test_core/test_graph_objs/test_property_assignment.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 6
|
2019-05-03T02:12:04.000Z
|
2020-03-01T06:33:21.000Z
|
plotly/tests/test_core/test_graph_objs/test_property_assignment.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/tests/test_core/test_graph_objs/test_property_assignment.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 5
|
2019-05-18T16:50:11.000Z
|
2021-07-06T21:14:36.000Z
|
from unittest import TestCase
import plotly.graph_objs as go
from plotly.tests.utils import strip_dict_params
class TestAssignmentPrimitive(TestCase):
def setUp(self):
# Construct initial scatter object
self.scatter = go.Scatter(name='scatter A')
# Assert initial state
d1, d2 = strip_dict_params(
self.scatter,
{'type': 'scatter',
'name': 'scatter A'}
)
assert d1 == d2
# Construct expected results
self.expected_toplevel = {
'type': 'scatter',
'name': 'scatter A',
'fillcolor': 'green'}
self.expected_nested = {
'type': 'scatter',
'name': 'scatter A',
'marker': {'colorbar': {
'title': {'font': {'family': 'courier'}}}}}
def test_toplevel_attr(self):
assert self.scatter.fillcolor is None
self.scatter.fillcolor = 'green'
assert self.scatter.fillcolor == 'green'
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_toplevel_item(self):
assert self.scatter['fillcolor'] is None
self.scatter['fillcolor'] = 'green'
assert self.scatter['fillcolor'] == 'green'
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_nested_attr(self):
assert self.scatter.marker.colorbar.titlefont.family is None
self.scatter.marker.colorbar.titlefont.family = 'courier'
assert self.scatter.marker.colorbar.titlefont.family == 'courier'
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_item(self):
assert (self.scatter['marker']['colorbar']['title']['font']['family']
is None)
self.scatter['marker']['colorbar']['title']['font']['family'] = \
'courier'
assert (self.scatter['marker']['colorbar']['title']['font']['family']
== 'courier')
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_item_dots(self):
assert self.scatter['marker.colorbar.title.font.family'] is None
self.scatter['marker.colorbar.title.font.family'] = 'courier'
assert self.scatter['marker.colorbar.title.font.family'] == 'courier'
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_item_tuple(self):
assert self.scatter['marker.colorbar.title.font.family'] is None
self.scatter[('marker', 'colorbar', 'title.font', 'family')] = 'courier'
assert (self.scatter[('marker', 'colorbar', 'title.font', 'family')]
== 'courier')
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update(self):
self.scatter.update(
marker={'colorbar': {'title': {'font': {'family': 'courier'}}}})
assert (self.scatter[('marker', 'colorbar', 'title', 'font', 'family')]
== 'courier')
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
class TestAssignmentCompound(TestCase):
def setUp(self):
# Construct initial scatter object
self.scatter = go.Scatter(name='scatter A')
# Assert initial state
d1, d2 = strip_dict_params(
self.scatter,
{'type': 'scatter',
'name': 'scatter A'}
)
assert d1 == d2
# Construct expected results
self.expected_toplevel = {
'type': 'scatter',
'name': 'scatter A',
'marker': {'color': 'yellow',
'size': 10}}
self.expected_nested = {
'type': 'scatter',
'name': 'scatter A',
'marker': {'colorbar': {
'bgcolor': 'yellow',
'thickness': 5}}}
def test_toplevel_obj(self):
d1, d2 = strip_dict_params(self.scatter.marker, {})
assert d1 == d2
self.scatter.marker = go.scatter.Marker(color='yellow', size=10)
assert isinstance(self.scatter.marker, go.scatter.Marker)
d1, d2 = strip_dict_params(self.scatter.marker,
self.expected_toplevel['marker'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_toplevel_dict(self):
d1, d2 = strip_dict_params(self.scatter['marker'], {})
assert d1 == d2
self.scatter['marker'] = dict(color='yellow', size=10)
assert isinstance(self.scatter['marker'], go.scatter.Marker)
d1, d2 = strip_dict_params(self.scatter.marker,
self.expected_toplevel['marker'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_nested_obj(self):
d1, d2 = strip_dict_params(self.scatter.marker.colorbar, {})
assert d1 == d2
self.scatter.marker.colorbar = go.scatter.marker.ColorBar(
bgcolor='yellow', thickness=5)
assert isinstance(self.scatter.marker.colorbar,
go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(self.scatter.marker.colorbar,
self.expected_nested['marker']['colorbar'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_dict(self):
d1, d2 = strip_dict_params(self.scatter['marker']['colorbar'], {})
assert d1 == d2
self.scatter['marker']['colorbar'] = dict(
bgcolor='yellow', thickness=5)
assert isinstance(self.scatter['marker']['colorbar'],
go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(self.scatter['marker']['colorbar'],
self.expected_nested['marker']['colorbar'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_dict_dot(self):
d1, d2 = strip_dict_params(self.scatter.marker.colorbar, {})
assert d1 == d2
self.scatter['marker.colorbar'] = dict(
bgcolor='yellow', thickness=5)
assert isinstance(self.scatter['marker.colorbar'],
go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(self.scatter['marker.colorbar'],
self.expected_nested['marker']['colorbar'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_dict_tuple(self):
d1, d2 = strip_dict_params(self.scatter[('marker', 'colorbar')], {})
assert d1 == d2
self.scatter[('marker', 'colorbar')] = dict(
bgcolor='yellow', thickness=5)
assert isinstance(self.scatter[('marker', 'colorbar')],
go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(self.scatter[('marker', 'colorbar')],
self.expected_nested['marker']['colorbar'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update_obj(self):
self.scatter.update(
marker={'colorbar':
go.scatter.marker.ColorBar(bgcolor='yellow',
thickness=5)})
assert isinstance(self.scatter['marker']['colorbar'],
go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(self.scatter['marker']['colorbar'],
self.expected_nested['marker']['colorbar'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update_dict(self):
self.scatter.update(
marker={'colorbar': dict(bgcolor='yellow', thickness=5)})
assert isinstance(self.scatter['marker']['colorbar'],
go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(self.scatter['marker']['colorbar'],
self.expected_nested['marker']['colorbar'])
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
class TestAssignmnetNone(TestCase):
def test_toplevel(self):
# Initialize scatter
scatter = go.Scatter(name='scatter A',
y=[3, 2, 4],
marker={
'colorbar': {
'title': {'font': {
'family': 'courier'}}}})
expected = {
'type': 'scatter',
'name': 'scatter A',
'y': [3, 2, 4],
'marker': {'colorbar': {
'title': {'font': {'family': 'courier'}}}}}
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
# Set property not defined to None
scatter.x = None
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
scatter['line.width'] = None
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
# Set defined property to None
scatter.y = None
expected.pop('y')
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
# Set compound properties to None
scatter[('marker', 'colorbar', 'title', 'font')] = None
expected['marker']['colorbar']['title'].pop('font')
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
scatter.marker = None
expected.pop('marker')
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
class TestAssignCompoundArray(TestCase):
def setUp(self):
# Construct initial scatter object
self.parcoords = go.Parcoords(name='parcoords A')
# Assert initial state
d1, d2 = strip_dict_params(
self.parcoords,
{'type': 'parcoords',
'name': 'parcoords A'}
)
assert d1 == d2
# Construct expected results
self.expected_toplevel = {
'type': 'parcoords',
'name': 'parcoords A',
'dimensions': [
{'values': [2, 3, 1], 'visible': True},
{'values': [1, 2, 3], 'label': 'dim1'}]}
self.layout = go.Layout()
self.expected_layout1 = {
'updatemenus': [{},
{'font': {'family': 'courier'}}]
}
self.expected_layout2 = {
'updatemenus': [{},
{'buttons': [
{}, {}, {'method': 'restyle'}]}]
}
def test_assign_toplevel_array(self):
self.assertEqual(self.parcoords.dimensions, ())
self.parcoords['dimensions'] = [
go.parcoords.Dimension(values=[2, 3, 1], visible=True),
dict(values=[1, 2, 3], label='dim1')]
self.assertEqual(self.parcoords.to_plotly_json(),
self.expected_toplevel)
def test_assign_nested_attr(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
self.assertEqual(self.layout['updatemenus'],
(go.layout.Updatemenu(), go.layout.Updatemenu()))
self.layout.updatemenus[1].font.family = 'courier'
d1, d2 = strip_dict_params(self.layout, self.expected_layout1)
assert d1 == d2
def test_assign_double_nested_attr(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout.updatemenus[1].buttons = [{}, {}, {}]
# Assign
self.layout.updatemenus[1].buttons[2].method = 'restyle'
# Check
self.assertEqual(
self.layout.updatemenus[1].buttons[2].method,
'restyle')
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_item(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout['updatemenus'][1]['buttons'] = [{}, {}, {}]
# Assign
self.layout['updatemenus'][1]['buttons'][2]['method'] = 'restyle'
# Check
self.assertEqual(
self.layout['updatemenus'][1]['buttons'][2]['method'],
'restyle'
)
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_tuple(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout[('updatemenus', 1, 'buttons')] = [{}, {}, {}]
# Assign
self.layout[('updatemenus', 1, 'buttons', 2, 'method')] = 'restyle'
# Check
self.assertEqual(
self.layout[('updatemenus', 1, 'buttons', 2, 'method')],
'restyle')
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_dot(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout['updatemenus'] = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout['updatemenus.1.buttons'] = [{}, {}, {}]
# Assign
self.layout['updatemenus[1].buttons[2].method'] = 'restyle'
# Check
self.assertEqual(
self.layout['updatemenus[1].buttons[2].method'],
'restyle')
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_update_dict(self):
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout.updatemenus[1].buttons = [{}, {}, {}]
# Update
self.layout.update(
updatemenus={1: {'buttons': {2: {'method': 'restyle'}}}})
# Check
self.assertEqual(
self.layout.updatemenus[1].buttons[2].method,
'restyle')
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_update_array(self):
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout.updatemenus[1].buttons = [{}, {}, {}]
# Update
self.layout.update(
updatemenus=[{}, {'buttons': [{}, {}, {'method': 'restyle'}]}])
# Check
self.assertEqual(
self.layout.updatemenus[1].buttons[2].method,
'restyle')
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
| 34.115974
| 80
| 0.558014
| 1,609
| 15,591
| 5.281541
| 0.065258
| 0.042363
| 0.081196
| 0.06884
| 0.89421
| 0.874206
| 0.852083
| 0.830548
| 0.827136
| 0.814074
| 0
| 0.023752
| 0.30601
| 15,591
| 456
| 81
| 34.190789
| 0.761645
| 0.056956
| 0
| 0.5623
| 0
| 0
| 0.123107
| 0.0148
| 0
| 0
| 0
| 0
| 0.255591
| 1
| 0.086262
| false
| 0
| 0.009585
| 0
| 0.108626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc98d9e25f14102063da7a8ecd8e87c56ea7b94e
| 1,422
|
py
|
Python
|
tests/templatetags/test_url_replace.py
|
LogicalAddress/wagtailstreamforms
|
d2e9519de643f18e2d879b2cd648f532c9673580
|
[
"MIT"
] | null | null | null |
tests/templatetags/test_url_replace.py
|
LogicalAddress/wagtailstreamforms
|
d2e9519de643f18e2d879b2cd648f532c9673580
|
[
"MIT"
] | null | null | null |
tests/templatetags/test_url_replace.py
|
LogicalAddress/wagtailstreamforms
|
d2e9519de643f18e2d879b2cd648f532c9673580
|
[
"MIT"
] | 1
|
2020-05-13T16:26:38.000Z
|
2020-05-13T16:26:38.000Z
|
import urllib.parse as urlparse
from ..test_case import AppTestCase
class TemplateTagTests(AppTestCase):
def test_kwarg_added(self):
fake_request = self.rf.get("/")
rendered = self.render_template(
"{% load streamforms_tags %}?{% url_replace page=1 %}",
{"request": fake_request},
)
# parse the url as they can be reordered unpredictably
parsed = urlparse.parse_qs(urlparse.urlparse(rendered).query)
self.assertDictEqual(parsed, {"page": ["1"]})
def test_kwarg_appended(self):
fake_request = self.rf.get("/?foo=bar")
rendered = self.render_template(
"{% load streamforms_tags %}?{% url_replace page=1 %}",
{"request": fake_request},
)
# parse the url as they can be reordered unpredictably
parsed = urlparse.parse_qs(urlparse.urlparse(rendered).query)
self.assertDictEqual(parsed, {"foo": ["bar"], "page": ["1"]})
def test_kwarg_replaced(self):
fake_request = self.rf.get("/?foo=bar&page=1")
rendered = self.render_template(
"{% load streamforms_tags %}?{% url_replace page=5 %}",
{"request": fake_request},
)
# parse the url as they can be reordered unpredictably
parsed = urlparse.parse_qs(urlparse.urlparse(rendered).query)
self.assertDictEqual(parsed, {"foo": ["bar"], "page": ["5"]})
| 39.5
| 69
| 0.616737
| 161
| 1,422
| 5.291925
| 0.273292
| 0.077465
| 0.042254
| 0.066901
| 0.849765
| 0.814554
| 0.786385
| 0.786385
| 0.715962
| 0.715962
| 0
| 0.006524
| 0.245429
| 1,422
| 35
| 70
| 40.628571
| 0.787512
| 0.111111
| 0
| 0.407407
| 0
| 0
| 0.18254
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.074074
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5da58fed144f15825c125537b6d2c8b2086fb45a
| 29
|
py
|
Python
|
bricklayer/util/__init__.py
|
loganwang007/bricklayer
|
531dd4acaf20574a9d2f7f0adf68789888288157
|
[
"Apache-2.0"
] | null | null | null |
bricklayer/util/__init__.py
|
loganwang007/bricklayer
|
531dd4acaf20574a9d2f7f0adf68789888288157
|
[
"Apache-2.0"
] | null | null | null |
bricklayer/util/__init__.py
|
loganwang007/bricklayer
|
531dd4acaf20574a9d2f7f0adf68789888288157
|
[
"Apache-2.0"
] | null | null | null |
from . import parallel_fetch
| 14.5
| 28
| 0.827586
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5dc252dca3d1ebf17d14a9042d5215bab07c8921
| 24
|
py
|
Python
|
rcnn/modeling/opld/heads/__init__.py
|
yf19970118/OPLD-Pytorch
|
4939bf62587da4533276fda20db36bb019575511
|
[
"MIT"
] | 25
|
2020-08-28T07:28:16.000Z
|
2022-03-06T06:18:56.000Z
|
rcnn/modeling/opld/heads/__init__.py
|
yf19970118/OPLD-Pytorch
|
4939bf62587da4533276fda20db36bb019575511
|
[
"MIT"
] | 5
|
2020-12-22T07:42:50.000Z
|
2021-07-12T01:49:57.000Z
|
rcnn/modeling/opld/heads/__init__.py
|
yf19970118/OPLD-Pytorch
|
4939bf62587da4533276fda20db36bb019575511
|
[
"MIT"
] | 4
|
2020-12-19T03:14:26.000Z
|
2021-12-17T12:38:37.000Z
|
from .opld_head import *
| 24
| 24
| 0.791667
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f8e0cf4abc6f244e2b8e67c1aaae39449c3bdf84
| 36,187
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/measure.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/measure.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/measure.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DateType,
BooleanType,
DataType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class MeasureSchema:
"""
The Measure resource provides the definition of a quality measure.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
The Measure resource provides the definition of a quality measure.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a Measure resource
url: An absolute URI that is used to identify this measure when it is referenced in
a specification, model, design or an instance. This SHALL be a URL, SHOULD be
globally unique, and SHOULD be an address at which this measure is (or will
be) published. The URL SHOULD include the major version of the measure. For
more information see [Technical and Business
Versions](resource.html#versions).
identifier: A formal identifier that is used to identify this measure when it is
represented in other formats, or referenced in a specification, model, design
or an instance.
version: The identifier that is used to identify this version of the measure when it is
referenced in a specification, model, design or instance. This is an arbitrary
value managed by the measure author and is not expected to be globally unique.
For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is
not available. There is also no expectation that versions can be placed in a
lexicographical sequence. To provide a version consistent with the Decision
Support Service specification, use the format Major.Minor.Revision (e.g.
1.0.0). For more information on versioning knowledge assets, refer to the
Decision Support Service specification. Note that a version is required for
non-experimental active artifacts.
name: A natural language name identifying the measure. This name should be usable as
an identifier for the module by machine processing applications such as code
generation.
title: A short, descriptive, user-friendly title for the measure.
status: The status of this measure. Enables tracking the life-cycle of the content.
experimental: A boolean value to indicate that this measure is authored for testing purposes
(or education/evaluation/marketing), and is not intended to be used for
genuine usage.
date: The date (and optionally time) when the measure was published. The date must
change if and when the business version changes and it must change if the
status code changes. In addition, it should change when the substantive
content of the measure changes.
publisher: The name of the individual or organization that published the measure.
description: A free text natural language description of the measure from a consumer's
perspective.
purpose: Explaination of why this measure is needed and why it has been designed as it
has.
usage: A detailed description of how the measure is used from a clinical perspective.
approvalDate: The date on which the resource content was approved by the publisher. Approval
happens once when the content is officially approved for usage.
lastReviewDate: The date on which the resource content was last reviewed. Review happens
periodically after approval, but doesn't change the original approval date.
effectivePeriod: The period during which the measure content was or is planned to be in active
use.
useContext: The content was developed with a focus and intent of supporting the contexts
that are listed. These terms may be used to assist with indexing and searching
for appropriate measure instances.
jurisdiction: A legal or geographic region in which the measure is intended to be used.
topic: Descriptive topics related to the content of the measure. Topics provide a
high-level categorization of the type of the measure that can be useful for
filtering and searching.
contributor: A contributor to the content of the measure, including authors, editors,
reviewers, and endorsers.
contact: Contact details to assist a user in finding and communicating with the
publisher.
copyright: A copyright statement relating to the measure and/or its contents. Copyright
statements are generally legal restrictions on the use and publishing of the
measure.
relatedArtifact: Related artifacts such as additional documentation, justification, or
bibliographic references.
library: A reference to a Library resource containing the formal logic used by the
measure.
disclaimer: Notices and disclaimers regarding the use of the measure, or related to
intellectual property (such as code systems) referenced by the measure.
scoring: Indicates how the calculation is performed for the measure, including
proportion, ratio, continuous variable, and cohort. The value set is
extensible, allowing additional measure scoring types to be represented.
compositeScoring: If this is a composite measure, the scoring method used to combine the
component measures to determine the composite score.
type: Indicates whether the measure is used to examine a process, an outcome over
time, a patient-reported outcome, or a structure measure such as utilization.
riskAdjustment: A description of the risk adjustment factors that may impact the resulting
score for the measure and how they may be accounted for when computing and
reporting measure results.
rateAggregation: Describes how to combine the information calculated, based on logic in each of
several populations, into one summarized result.
rationale: Provides a succint statement of the need for the measure. Usually includes
statements pertaining to importance criterion: impact, gap in care, and
evidence.
clinicalRecommendationStatement: Provides a summary of relevant clinical guidelines or other clinical
recommendations supporting the measure.
improvementNotation: Information on whether an increase or decrease in score is the preferred
result (e.g., a higher score indicates better quality OR a lower score
indicates better quality OR quality is whthin a range).
definition: Provides a description of an individual term used within the measure.
guidance: Additional guidance for the measure including how it can be used in a clinical
context, and the intent of the measure.
set: The measure set, e.g. Preventive Care and Screening.
group: A group of population criteria for the measure.
supplementalData: The supplemental data criteria for the measure report, specified as either the
name of a valid CQL expression within a referenced library, or a valid FHIR
Resource Path.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.usagecontext import (
UsageContextSchema,
)
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.contributor import ContributorSchema
from spark_fhir_schemas.stu3.complex_types.contactdetail import (
ContactDetailSchema,
)
from spark_fhir_schemas.stu3.complex_types.relatedartifact import (
RelatedArtifactSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.measure_group import (
Measure_GroupSchema,
)
from spark_fhir_schemas.stu3.complex_types.measure_supplementaldata import (
Measure_SupplementalDataSchema,
)
if (
max_recursion_limit and nesting_list.count("Measure") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Measure"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a Measure resource
StructField("resourceType", StringType(), True),
# An absolute URI that is used to identify this measure when it is referenced in
# a specification, model, design or an instance. This SHALL be a URL, SHOULD be
# globally unique, and SHOULD be an address at which this measure is (or will
# be) published. The URL SHOULD include the major version of the measure. For
# more information see [Technical and Business
# Versions](resource.html#versions).
StructField("url", StringType(), True),
# A formal identifier that is used to identify this measure when it is
# represented in other formats, or referenced in a specification, model, design
# or an instance.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The identifier that is used to identify this version of the measure when it is
# referenced in a specification, model, design or instance. This is an arbitrary
# value managed by the measure author and is not expected to be globally unique.
# For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is
# not available. There is also no expectation that versions can be placed in a
# lexicographical sequence. To provide a version consistent with the Decision
# Support Service specification, use the format Major.Minor.Revision (e.g.
# 1.0.0). For more information on versioning knowledge assets, refer to the
# Decision Support Service specification. Note that a version is required for
# non-experimental active artifacts.
StructField("version", StringType(), True),
# A natural language name identifying the measure. This name should be usable as
# an identifier for the module by machine processing applications such as code
# generation.
StructField("name", StringType(), True),
# A short, descriptive, user-friendly title for the measure.
StructField("title", StringType(), True),
# The status of this measure. Enables tracking the life-cycle of the content.
StructField("status", StringType(), True),
# A boolean value to indicate that this measure is authored for testing purposes
# (or education/evaluation/marketing), and is not intended to be used for
# genuine usage.
StructField("experimental", BooleanType(), True),
# The date (and optionally time) when the measure was published. The date must
# change if and when the business version changes and it must change if the
# status code changes. In addition, it should change when the substantive
# content of the measure changes.
StructField("date", StringType(), True),
# The name of the individual or organization that published the measure.
StructField("publisher", StringType(), True),
# A free text natural language description of the measure from a consumer's
# perspective.
StructField("description", StringType(), True),
# Explaination of why this measure is needed and why it has been designed as it
# has.
StructField("purpose", StringType(), True),
# A detailed description of how the measure is used from a clinical perspective.
StructField("usage", StringType(), True),
# The date on which the resource content was approved by the publisher. Approval
# happens once when the content is officially approved for usage.
StructField("approvalDate", DateType(), True),
# The date on which the resource content was last reviewed. Review happens
# periodically after approval, but doesn't change the original approval date.
StructField("lastReviewDate", DateType(), True),
# The period during which the measure content was or is planned to be in active
# use.
StructField(
"effectivePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The content was developed with a focus and intent of supporting the contexts
# that are listed. These terms may be used to assist with indexing and searching
# for appropriate measure instances.
StructField(
"useContext",
ArrayType(
UsageContextSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A legal or geographic region in which the measure is intended to be used.
StructField(
"jurisdiction",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Descriptive topics related to the content of the measure. Topics provide a
# high-level categorization of the type of the measure that can be useful for
# filtering and searching.
StructField(
"topic",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A contributor to the content of the measure, including authors, editors,
# reviewers, and endorsers.
StructField(
"contributor",
ArrayType(
ContributorSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Contact details to assist a user in finding and communicating with the
# publisher.
StructField(
"contact",
ArrayType(
ContactDetailSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A copyright statement relating to the measure and/or its contents. Copyright
# statements are generally legal restrictions on the use and publishing of the
# measure.
StructField("copyright", StringType(), True),
# Related artifacts such as additional documentation, justification, or
# bibliographic references.
StructField(
"relatedArtifact",
ArrayType(
RelatedArtifactSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A reference to a Library resource containing the formal logic used by the
# measure.
StructField(
"library",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Notices and disclaimers regarding the use of the measure, or related to
# intellectual property (such as code systems) referenced by the measure.
StructField("disclaimer", StringType(), True),
# Indicates how the calculation is performed for the measure, including
# proportion, ratio, continuous variable, and cohort. The value set is
# extensible, allowing additional measure scoring types to be represented.
StructField(
"scoring",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# If this is a composite measure, the scoring method used to combine the
# component measures to determine the composite score.
StructField(
"compositeScoring",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates whether the measure is used to examine a process, an outcome over
# time, a patient-reported outcome, or a structure measure such as utilization.
StructField(
"type",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A description of the risk adjustment factors that may impact the resulting
# score for the measure and how they may be accounted for when computing and
# reporting measure results.
StructField("riskAdjustment", StringType(), True),
# Describes how to combine the information calculated, based on logic in each of
# several populations, into one summarized result.
StructField("rateAggregation", StringType(), True),
# Provides a succint statement of the need for the measure. Usually includes
# statements pertaining to importance criterion: impact, gap in care, and
# evidence.
StructField("rationale", StringType(), True),
# Provides a summary of relevant clinical guidelines or other clinical
# recommendations supporting the measure.
StructField("clinicalRecommendationStatement", StringType(), True),
# Information on whether an increase or decrease in score is the preferred
# result (e.g., a higher score indicates better quality OR a lower score
# indicates better quality OR quality is whthin a range).
StructField("improvementNotation", StringType(), True),
# Provides a description of an individual term used within the measure.
StructField("definition", ArrayType(StringType()), True),
# Additional guidance for the measure including how it can be used in a clinical
# context, and the intent of the measure.
StructField("guidance", StringType(), True),
# The measure set, e.g. Preventive Care and Screening.
StructField("set", StringType(), True),
# A group of population criteria for the measure.
StructField(
"group",
ArrayType(
Measure_GroupSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The supplemental data criteria for the measure report, specified as either the
# name of a valid CQL expression within a referenced library, or a valid FHIR
# Resource Path.
StructField(
"supplementalData",
ArrayType(
Measure_SupplementalDataSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 53.373156
| 109
| 0.575593
| 3,624
| 36,187
| 5.607616
| 0.139073
| 0.045468
| 0.028787
| 0.042516
| 0.828954
| 0.823098
| 0.823098
| 0.803612
| 0.795443
| 0.782256
| 0
| 0.002193
| 0.382458
| 36,187
| 677
| 110
| 53.451994
| 0.907191
| 0.421837
| 0
| 0.609375
| 0
| 0
| 0.030691
| 0.001562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002604
| false
| 0
| 0.041667
| 0
| 0.052083
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d29473e449eeab1bab8b249c22fab288ed9d6c8
| 5,552
|
py
|
Python
|
find_a_supplier/tests/factories.py
|
thibaudcolas/directory-cms
|
d958360fe5491a92977d754cfd0d7f8a4695639e
|
[
"MIT"
] | null | null | null |
find_a_supplier/tests/factories.py
|
thibaudcolas/directory-cms
|
d958360fe5491a92977d754cfd0d7f8a4695639e
|
[
"MIT"
] | null | null | null |
find_a_supplier/tests/factories.py
|
thibaudcolas/directory-cms
|
d958360fe5491a92977d754cfd0d7f8a4695639e
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import factory
import factory.fuzzy
import wagtail_factories
from directory_constants.constants import choices
from find_a_supplier import models
class IndustryPageFactory(wagtail_factories.PageFactory):
class Meta:
model = models.IndustryPage
hero_text_en_gb = factory.fuzzy.FuzzyText(length=255)
introduction_text_en_gb = factory.fuzzy.FuzzyText(length=255)
introduction_call_to_action_button_text_en_gb = factory.fuzzy.FuzzyText(
length=50
)
introduction_title_en_gb = factory.fuzzy.FuzzyText(length=400)
introduction_column_one_text_en_gb = factory.fuzzy.FuzzyText(length=255)
introduction_column_two_text_en_gb = factory.fuzzy.FuzzyText(length=255)
introduction_column_three_text_en_gb = factory.fuzzy.FuzzyText(length=255)
company_list_text_en_gb = factory.fuzzy.FuzzyText(length=255)
company_list_call_to_action_text_en_gb = factory.fuzzy.FuzzyText(
length=255
)
company_list_search_input_placeholder_text_en_gb = factory.fuzzy.FuzzyText(
length=50
)
breadcrumbs_label_en_gb = factory.fuzzy.FuzzyText(length=50)
search_filter_sector = factory.fuzzy.FuzzyChoice(
[[i[0]] for i in choices.INDUSTRIES]
)
search_description_en_gb = factory.fuzzy.FuzzyText(length=255)
title_en_gb = factory.fuzzy.FuzzyText(length=255)
introduction_column_two_icon = factory.SubFactory(
wagtail_factories.ImageFactory
)
introduction_column_three_icon = factory.SubFactory(
wagtail_factories.ImageFactory
)
introduction_column_one_icon = factory.SubFactory(
wagtail_factories.ImageFactory
)
slug = factory.Sequence(lambda n: '123-555-{0}'.format(n))
parent = None
class LandingPageFactory(wagtail_factories.PageFactory):
class Meta:
model = models.LandingPage
hero_text_en_gb = factory.fuzzy.FuzzyText(length=255)
breadcrumbs_label_en_gb = factory.fuzzy.FuzzyText(length=50)
search_field_placeholder_en_gb = factory.fuzzy.FuzzyText(length=255)
search_button_text_en_gb = factory.fuzzy.FuzzyText(length=255)
proposition_text_en_gb = factory.fuzzy.FuzzyText(length=255)
call_to_action_text_en_gb = factory.fuzzy.FuzzyText(length=255)
industries_list_text_en_gb = factory.fuzzy.FuzzyText(length=255)
industries_list_call_to_action_text_en_gb = factory.fuzzy.FuzzyText(
length=255
)
services_list_text_en_gb = factory.fuzzy.FuzzyText(length=255)
services_column_one_en_gb = factory.fuzzy.FuzzyText(length=255)
services_column_two_en_gb = factory.fuzzy.FuzzyText(length=255)
services_column_three_en_gb = factory.fuzzy.FuzzyText(length=255)
services_column_four_en_gb = factory.fuzzy.FuzzyText(length=255)
services_column_one_icon_en_gb = factory.SubFactory(
wagtail_factories.ImageFactory
)
services_column_two_icon_en_gb = factory.SubFactory(
wagtail_factories.ImageFactory
)
services_column_three_icon_en_gb = factory.SubFactory(
wagtail_factories.ImageFactory
)
services_column_four_icon_en_gb = factory.SubFactory(
wagtail_factories.ImageFactory
)
search_description_en_gb = factory.fuzzy.FuzzyText(length=255)
slug = factory.Sequence(lambda n: '123-555-{0}'.format(n))
title_en_gb = factory.Sequence(lambda n: '123-555-{0}'.format(n))
parent = None
class IndustryLandingPageFactory(wagtail_factories.PageFactory):
class Meta:
model = models.IndustryLandingPage
hero_title_en_gb = factory.fuzzy.FuzzyText(length=255)
hero_image = factory.SubFactory(
wagtail_factories.ImageFactory
)
proposition_text_en_gb = factory.fuzzy.FuzzyText(length=255)
call_to_action_text_en_gb = factory.fuzzy.FuzzyText(length=255)
breadcrumbs_label_en_gb = factory.fuzzy.FuzzyText(length=50)
search_description_en_gb = factory.fuzzy.FuzzyText(length=255)
slug = factory.Sequence(lambda n: '123-555-{0}'.format(n))
title_en_gb = factory.Sequence(lambda n: '123-555-{0}'.format(n))
more_industries_title_en_gb = factory.fuzzy.FuzzyText(length=100)
parent = None
class IndustryContactPageFactory(wagtail_factories.PageFactory):
class Meta:
model = models.IndustryContactPage
breadcrumbs_label_en_gb = factory.fuzzy.FuzzyText(length=50)
introduction_text_en_gb = factory.fuzzy.FuzzyText(length=255)
submit_button_text_en_gb = factory.fuzzy.FuzzyText(length=100)
success_message_text_en_gb = factory.fuzzy.FuzzyText(length=255)
success_back_link_text_en_gb = factory.fuzzy.FuzzyText(length=100)
slug = factory.Sequence(lambda n: '123-555-{0}'.format(n))
parent = None
class IndustryArticlePageFactory(wagtail_factories.PageFactory):
class Meta:
model = models.IndustryArticlePage
breadcrumbs_label_en_gb = factory.fuzzy.FuzzyText(length=50)
introduction_title_en_gb = factory.fuzzy.FuzzyText(length=255)
body_en_gb = factory.fuzzy.FuzzyText(length=100)
author_name_en_gb = factory.fuzzy.FuzzyText(length=100)
job_title_en_gb = factory.fuzzy.FuzzyText(length=100)
proposition_text_en_gb = factory.fuzzy.FuzzyText(length=100)
call_to_action_text_en_gb = factory.fuzzy.FuzzyText(length=100)
back_to_home_link_text_en_gb = factory.fuzzy.FuzzyText(length=100)
social_share_title_en_gb = factory.fuzzy.FuzzyText(length=100)
date_en_gb = factory.LazyFunction(datetime.now)
slug = factory.Sequence(lambda n: 'IndustryArticlePage-{0}'.format(n))
parent = None
| 39.657143
| 79
| 0.76639
| 722
| 5,552
| 5.560942
| 0.134349
| 0.053798
| 0.147945
| 0.187298
| 0.826401
| 0.804234
| 0.79203
| 0.708095
| 0.564633
| 0.454795
| 0
| 0.037608
| 0.147514
| 5,552
| 139
| 80
| 39.942446
| 0.810691
| 0
| 0
| 0.37931
| 0
| 0
| 0.01603
| 0.004143
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051724
| 0
| 0.732759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
5d6151c31a80ce9a29b8b121b166d4d3eb910101
| 1,301
|
py
|
Python
|
release/scripts/addons/add_curve_sapling/presets/black_tupelo.py
|
wycivil08/blendocv
|
f6cce83e1f149fef39afa8043aade9c64378f33e
|
[
"Unlicense"
] | 30
|
2015-01-29T14:06:05.000Z
|
2022-01-10T07:47:29.000Z
|
release/scripts/addons/add_curve_sapling/presets/black_tupelo.py
|
ttagu99/blendocv
|
f6cce83e1f149fef39afa8043aade9c64378f33e
|
[
"Unlicense"
] | 1
|
2017-02-20T20:57:48.000Z
|
2018-12-19T23:44:38.000Z
|
release/scripts/addons/add_curve_sapling/presets/black_tupelo.py
|
ttagu99/blendocv
|
f6cce83e1f149fef39afa8043aade9c64378f33e
|
[
"Unlicense"
] | 15
|
2015-04-23T02:38:36.000Z
|
2021-03-01T20:09:39.000Z
|
{'pruneWidthPeak': 0.6000000238418579, 'downAngleV': (0.0, -40.0, 10.0, 10.0), 'frameRate': 1.0, 'lengthV': (0.0, 0.05000000074505806, 0.10000000149011612, 0.0), 'shape': '4', 'seed': 0, 'bend': 0.0, 'armAnim': False, 'useArm': False, 'splitAngle': (0.0, 0.0, 0.0, 0.0), 'baseSize': 0.20000000298023224, 'baseSplits': 0, 'scaleV': 5.0, 'scale': 23.0, 'ratio': 0.014999999664723873, 'curveV': (40.0, 90.0, 150.0, 0.0), 'prunePowerHigh': 0.5, 'splitAngleV': (0.0, 0.0, 0.0, 0.0), 'resU': 4, 'segSplits': (0.0, 0.0, 0.0, 0.0), 'ratioPower': 1.2999999523162842, 'handleType': '1', 'length': (1.0, 0.30000001192092896, 0.6000000238418579, 0.4000000059604645), 'rotateV': (0.0, 0.0, 0.0, 0.0), 'attractUp': 0.5, 'scale0': 1.0, 'bevel': False, 'leafDist': '4', 'chooseSet': '0', 'levels': 4, 'downAngle': (90.0, 60.0, 30.0, 45.0), 'showLeaves': False, 'prunePowerLow': 0.0010000000474974513, 'scaleV0': 0.0, 'leafScaleX': 0.5, 'curveRes': (10, 10, 10, 1), 'rotate': (140.0, 140.0, 140.0, 140.0), 'branches': (0, 50, 25, 12), 'prune': False, 'bevelRes': 0, 'taper': (1.0, 1.0, 1.0, 1.0), 'pruneRatio': 1.0, 'leaves': 6, 'curve': (0.0, 0.0, -10.0, 0.0), 'leafScale': 0.30000001192092896, 'windSpeed': 2.0, 'pruneWidth': 0.4000000059604645, 'windGust': 0.0, 'startCurv': 0.0, 'curveBack': (0.0, 0.0, 0.0, 0.0)}
| 650.5
| 1,300
| 0.613374
| 217
| 1,301
| 3.677419
| 0.359447
| 0.12782
| 0.131579
| 0.130326
| 0.080201
| 0.080201
| 0.050125
| 0.050125
| 0
| 0
| 0
| 0.329585
| 0.111453
| 1,301
| 1
| 1,301
| 1,301
| 0.360727
| 0
| 0
| 0
| 0
| 0
| 0.305919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5379ef4e8a83352ef31ee0dfd25e6bffa4c7dfe2
| 516
|
py
|
Python
|
sleekxmpp/features/feature_mechanisms/__init__.py
|
calendar42/SleekXMPP--XEP-0080-
|
d7bd5fd29f26a5d7de872a49ff63a353b8043e49
|
[
"BSD-3-Clause"
] | 1
|
2019-04-12T12:20:12.000Z
|
2019-04-12T12:20:12.000Z
|
sleekxmpp/features/feature_mechanisms/__init__.py
|
vijayp/SleekXMPP
|
b2e7f57334d27f140f079213c2016615b7168742
|
[
"BSD-3-Clause"
] | null | null | null |
sleekxmpp/features/feature_mechanisms/__init__.py
|
vijayp/SleekXMPP
|
b2e7f57334d27f140f079213c2016615b7168742
|
[
"BSD-3-Clause"
] | 1
|
2020-05-06T18:46:53.000Z
|
2020-05-06T18:46:53.000Z
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.features.feature_mechanisms.mechanisms import feature_mechanisms
from sleekxmpp.features.feature_mechanisms.stanza import Mechanisms
from sleekxmpp.features.feature_mechanisms.stanza import Auth
from sleekxmpp.features.feature_mechanisms.stanza import Success
from sleekxmpp.features.feature_mechanisms.stanza import Failure
| 36.857143
| 79
| 0.821705
| 65
| 516
| 6.430769
| 0.446154
| 0.244019
| 0.251196
| 0.334928
| 0.617225
| 0.526316
| 0.526316
| 0.287081
| 0
| 0
| 0
| 0.008869
| 0.125969
| 516
| 13
| 80
| 39.692308
| 0.91796
| 0.290698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53a7c0ef3add2fa14bcac9677413436390d55766
| 23
|
py
|
Python
|
imvr.py
|
StahlFerro/Erlenmeyer
|
a337813eb18f1a688dfe7da8b194c7887aa89bdb
|
[
"MIT"
] | 5
|
2019-10-19T11:57:14.000Z
|
2021-06-18T23:07:49.000Z
|
imvr.py
|
StahlFerro/IMVR
|
a337813eb18f1a688dfe7da8b194c7887aa89bdb
|
[
"MIT"
] | null | null | null |
imvr.py
|
StahlFerro/IMVR
|
a337813eb18f1a688dfe7da8b194c7887aa89bdb
|
[
"MIT"
] | null | null | null |
from server import app
| 11.5
| 22
| 0.826087
| 4
| 23
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99090a21d06745d522b7dbbbcd914d502ae7c5ed
| 21
|
py
|
Python
|
test/run.py
|
samiatunivr/CellMojo
|
cd7661667c88268146f8e0e8f13d4a571701d159
|
[
"Apache-2.0"
] | 2
|
2018-05-24T10:15:05.000Z
|
2021-10-08T09:18:39.000Z
|
test.py
|
Nexlson/Malaysia_License_Plate_Generator
|
46d8499208c217d3892769d4a4beaaccaf600bcc
|
[
"MIT"
] | null | null | null |
test.py
|
Nexlson/Malaysia_License_Plate_Generator
|
46d8499208c217d3892769d4a4beaaccaf600bcc
|
[
"MIT"
] | 1
|
2017-06-27T11:31:30.000Z
|
2017-06-27T11:31:30.000Z
|
import cv2
import os
| 7
| 10
| 0.809524
| 4
| 21
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.190476
| 21
| 2
| 11
| 10.5
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54d1d748fb13c276e094e74dee16082183783a4b
| 13,852
|
py
|
Python
|
tests/test_recursion.py
|
stac-utils/stac-validator
|
c53a8aa73c69db3820502606b51f108249fe728a
|
[
"Apache-2.0"
] | 2
|
2022-03-11T19:47:52.000Z
|
2022-03-15T13:35:07.000Z
|
tests/test_recursion.py
|
stac-utils/stac-validator
|
c53a8aa73c69db3820502606b51f108249fe728a
|
[
"Apache-2.0"
] | 8
|
2022-03-10T21:21:21.000Z
|
2022-03-24T19:21:37.000Z
|
tests/test_recursion.py
|
stac-utils/stac-validator
|
c53a8aa73c69db3820502606b51f108249fe728a
|
[
"Apache-2.0"
] | null | null | null |
"""
Description: Test validation for recursion
"""
__authors__ = "James Banting", "Jonathan Healy"
from stac_validator import stac_validator
def test_recursive_lvl_3_v070():
stac_file = "https://radarstac.s3.amazonaws.com/stac/catalog.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=4)
stac.run()
assert stac.message == [
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/catalog.json",
"schema": ["https://cdn.staclint.com/v0.7.0/catalog.json"],
"asset_type": "CATALOG",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/collection.json",
"schema": ["https://cdn.staclint.com/v0.7.0/collection.json"],
"asset_type": "COLLECTION",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/slc/catalog.json",
"schema": ["https://cdn.staclint.com/v0.7.0/catalog.json"],
"asset_type": "CATALOG",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/slc/2012-05-13/RS1_M0630938_F2N_20120513_225708_HH_SLC.json",
"schema": ["https://cdn.staclint.com/v0.7.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/slc/2012-06-14/RS1_M0634796_F3F_20120614_110317_HH_SLC.json",
"schema": ["https://cdn.staclint.com/v0.7.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/slc/2012-06-14/RS1_M0634795_F3F_20120614_110311_HH_SLC.json",
"schema": ["https://cdn.staclint.com/v0.7.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/slc/2012-10-12/RS1_M0634798_F3F_20121012_110325_HH_SLC.json",
"schema": ["https://cdn.staclint.com/v0.7.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/slc/2012-10-12/RS1_M0634799_F3F_20121012_110331_HH_SLC.json",
"schema": ["https://cdn.staclint.com/v0.7.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/raw/catalog.json",
"schema": ["https://cdn.staclint.com/v0.7.0/catalog.json"],
"asset_type": "CATALOG",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/radarsat-1/raw/2012-05-13/RS1_M0000676_F2N_20120513_225701_HH_RAW.json",
"schema": ["https://cdn.staclint.com/v0.7.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
]
def test_recursive_local_v090():
stac_file = "tests/test_data/v090/catalog.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=1)
stac.run()
assert stac.message == [
{
"version": "0.9.0",
"path": "tests/test_data/v090/catalog.json",
"schema": ["https://cdn.staclint.com/v0.9.0/catalog.json"],
"asset_type": "CATALOG",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.9.0",
"path": "tests/test_data/v090/items/sample.json",
"schema": ["https://cdn.staclint.com/v0.9.0/item.json"],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "0.9.0",
"path": "tests/test_data/v090/items/good_item_v090.json",
"schema": [
"https://cdn.staclint.com/v0.9.0/extension/eo.json",
"https://cdn.staclint.com/v0.9.0/extension/view.json",
"https://cdn.staclint.com/v0.9.0/item.json",
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
]
def test_recursive_v1beta1():
stac_file = "tests/test_data/1beta1/sentinel2.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=0)
stac.run()
assert stac.message == [
{
"version": "1.0.0-beta.1",
"path": "tests/test_data/1beta1/sentinel2.json",
"schema": ["https://cdn.staclint.com/v1.0.0-beta.1/collection.json"],
"asset_type": "COLLECTION",
"validation_method": "recursive",
"valid_stac": True,
}
]
def test_recursive_v1beta2():
stac_file = "https://raw.githubusercontent.com/stac-utils/pystac/main/tests/data-files/examples/1.0.0-beta.2/collection-spec/examples/sentinel2.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=0)
stac.run()
assert stac.message == [
{
"version": "1.0.0-beta.2",
"path": "https://raw.githubusercontent.com/stac-utils/pystac/main/tests/data-files/examples/1.0.0-beta.2/collection-spec/examples/sentinel2.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-beta.2/collection-spec/json-schema/collection.json"
],
"asset_type": "COLLECTION",
"validation_method": "recursive",
"valid_stac": True,
}
]
def test_recursion_collection_local_v1rc1():
stac_file = "tests/test_data/1rc1/collection.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=1)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.1",
"path": "tests/test_data/1rc1/collection.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.1/collection-spec/json-schema/collection.json"
],
"asset_type": "COLLECTION",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.1",
"path": "tests/test_data/1rc1/./simple-item.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.1/item-spec/json-schema/item.json"
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.1",
"path": "tests/test_data/1rc1/./core-item.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.1/item-spec/json-schema/item.json"
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.1",
"path": "tests/test_data/1rc1/./extended-item.json",
"schema": [
"https://cdn.staclint.com/v1.0.0-rc.1/extension/eo.json",
"https://cdn.staclint.com/v1.0.0-rc.1/extension/projection.json",
"https://cdn.staclint.com/v1.0.0-rc.1/extension/scientific.json",
"https://cdn.staclint.com/v1.0.0-rc.1/extension/view.json",
"https://schemas.stacspec.org/v1.0.0-rc.1/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
]
def test_recursion_collection_local_v1rc2():
stac_file = "tests/test_data/1rc2/collection.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=1)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/collection.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/collection-spec/json-schema/collection.json"
],
"asset_type": "COLLECTION",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/./simple-item.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json"
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/./core-item.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json"
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/./extended-item.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
"https://stac-extensions.github.io/view/v1.0.0/schema.json",
"https://stac-extensions.github.io/remote-data/v1.0.0/schema.json",
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
]
def test_recursion_collection_local_2_v1rc2():
stac_file = "tests/test_data/1rc2/extensions-collection/collection.json"
stac = stac_validator.StacValidate(stac_file, recursive=True, max_depth=1)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/extensions-collection/collection.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/collection-spec/json-schema/collection.json"
],
"asset_type": "COLLECTION",
"validation_method": "recursive",
"valid_stac": True,
},
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/extensions-collection/./proj-example/proj-example.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "recursive",
"valid_stac": True,
},
]
def test_recursion_without_max_depth():
stac_file = "tests/test_data/v100/catalog.json"
stac = stac_validator.StacValidate(stac_file, recursive=True)
stac.run()
assert len(stac.message) == 6
def test_recursion_with_bad_item():
stac_file = "tests/test_data/v100/catalog-with-bad-item.json"
stac = stac_validator.StacValidate(stac_file, recursive=True)
stac.run()
assert len(stac.message) == 2
assert stac.message == [
{
"version": "1.0.0",
"path": "tests/test_data/v100/catalog-with-bad-item.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0/catalog-spec/json-schema/catalog.json"
],
"valid_stac": True,
"asset_type": "CATALOG",
"validation_method": "recursive",
},
{
"version": "1.0.0",
"path": "tests/test_data/v100/./bad-item.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0/item-spec/json-schema/item.json"
],
"valid_stac": False,
"error_type": "JSONSchemaValidationError",
"error_message": "'id' is a required property of the root of the STAC object",
},
]
def test_recursion_with_missing_collection_link():
stac_file = "tests/test_data/v100/item-without-collection-link.json"
stac = stac_validator.StacValidate(stac_file, recursive=True)
assert not stac.run()
assert not stac.valid
assert len(stac.message) == 1
assert stac.message == [
{
"asset_type": "ITEM",
"version": "1.0.0",
"path": "tests/test_data/v100/item-without-collection-link.json",
"schema": [
"https://schemas.stacspec.org/v1.0.0/item-spec/json-schema/item.json"
],
"valid_stac": False,
"validation_method": "recursive",
"error_type": "JSONSchemaValidationError",
"error_message": "'simple-collection' should not be valid under {}. Error is in collection",
},
]
| 38.692737
| 158
| 0.546275
| 1,594
| 13,852
| 4.603513
| 0.092848
| 0.057236
| 0.057236
| 0.102208
| 0.889616
| 0.869719
| 0.851458
| 0.834969
| 0.817934
| 0.752385
| 0
| 0.054774
| 0.286962
| 13,852
| 357
| 159
| 38.80112
| 0.688164
| 0.003032
| 0
| 0.566265
| 0
| 0.105422
| 0.506268
| 0.080719
| 0
| 0
| 0
| 0
| 0.042169
| 1
| 0.03012
| false
| 0
| 0.003012
| 0
| 0.033133
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54e538ef874ff6235bd3adb281f8d273929ca01d
| 3,636
|
py
|
Python
|
pset_conditionals/rps/tests/test_p3.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 5
|
2019-04-08T20:05:37.000Z
|
2019-12-04T20:48:45.000Z
|
pset_conditionals/rps/tests/test_p3.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 8
|
2019-04-15T15:16:05.000Z
|
2022-02-12T10:33:32.000Z
|
pset_conditionals/rps/tests/test_p3.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 2
|
2019-04-10T00:14:42.000Z
|
2020-02-26T20:35:21.000Z
|
import io
import pytest
import sys
from unittest import TestCase
from unittest.mock import patch
@pytest.mark.describe('Play RPS w/Input')
class TestPrint(TestCase):
vals = ["1", "3"]
def set_pvals(self):
vals = self.vals
def ret(*args, **kwargs):
nonlocal vals
r = vals[0]
vals = vals[1:]
return r
return ret
@pytest.mark.it('if p1 and p2 are equal then print 0')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_tie(self, mock_input, mock_stdout):
mock_input.return_value = '1'
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "0" in stdout_sanitized
@pytest.mark.it('if p1 is r and p2 is s, print 1')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_rs(self, mock_input, mock_stdout):
self.vals = ["r", "s"]
mock_input.side_effect = self.set_pvals()
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "1" in stdout_sanitized
@pytest.mark.it('if p1 is r and p2 is p, print 2')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_rp(self, mock_input, mock_stdout):
self.vals = ["r", "p"]
mock_input.side_effect = self.set_pvals()
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "2" in stdout_sanitized
@pytest.mark.it('if p1 is s and p2 is p, print 1')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_sp(self, mock_randint, mock_stdout):
self.vals = ["s", "p"]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "1" in stdout_sanitized
@pytest.mark.it('if p1 is s and p2 is r, print 2')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_sr(self, mock_randint, mock_stdout):
self.vals = ["s", "r"]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "2" in stdout_sanitized
@pytest.mark.it('if p1 is p and p2 is r, print 1')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_pr(self, mock_randint, mock_stdout):
self.vals = ["p", "r"]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "1" in stdout_sanitized
@pytest.mark.it('if p1 is p and p2 is s, print 2')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('builtins.input')
def test_output_ps(self, mock_randint, mock_stdout):
self.vals = ["p", "s"]
mock_randint.side_effect = self.set_pvals()
if sys.modules.get('p3'):
del sys.modules['p3']
import p3
stdout_sanitized = mock_stdout.getvalue().replace('\n', '')
assert "2" in stdout_sanitized
| 32.176991
| 67
| 0.604235
| 506
| 3,636
| 4.189723
| 0.142292
| 0.066038
| 0.039623
| 0.046226
| 0.862736
| 0.841509
| 0.841509
| 0.841509
| 0.74717
| 0.74717
| 0
| 0.019941
| 0.255226
| 3,636
| 112
| 68
| 32.464286
| 0.762925
| 0
| 0
| 0.586957
| 0
| 0
| 0.128988
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 1
| 0.097826
| false
| 0
| 0.130435
| 0
| 0.271739
| 0.076087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54fef41e30838bf329ca8f50c5ce5e2a5fa44c17
| 268
|
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/src/item.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/src/item.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m6/61b1/src/item.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# items implementation
class Item:
def __init__(self, item_name, item_description):
self.item_name = item_name
self.item_description = item_description
def __str__(self):
return '%s, %s' % (self.item_name, self.item_description)
| 24.363636
| 65
| 0.675373
| 33
| 268
| 5
| 0.363636
| 0.242424
| 0.218182
| 0.193939
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231343
| 268
| 11
| 66
| 24.363636
| 0.800971
| 0.074627
| 0
| 0
| 0
| 0
| 0.024292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
07383dd63815185948f0efbf9321be9f31f6a881
| 192
|
py
|
Python
|
kultur.py
|
mtfalch/kulTUR
|
67d296537b55be59f52a557ee54a6b22a40b7b2d
|
[
"Apache-2.0"
] | null | null | null |
kultur.py
|
mtfalch/kulTUR
|
67d296537b55be59f52a557ee54a6b22a40b7b2d
|
[
"Apache-2.0"
] | null | null | null |
kultur.py
|
mtfalch/kulTUR
|
67d296537b55be59f52a557ee54a6b22a40b7b2d
|
[
"Apache-2.0"
] | null | null | null |
from app import app, db
from app.models import User, Trips, Tracks
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Trips': Trips, 'Tracks': Tracks}
| 32
| 69
| 0.723958
| 29
| 192
| 4.655172
| 0.482759
| 0.103704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 192
| 6
| 69
| 32
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.088083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
07415719ff9b5330d255aef89974907147b9689b
| 128
|
py
|
Python
|
league/views.py
|
MostafaMotahari/Sila-Website
|
0004ac5a502c1b9febfd8a9087eb7a932dd039f1
|
[
"MIT"
] | null | null | null |
league/views.py
|
MostafaMotahari/Sila-Website
|
0004ac5a502c1b9febfd8a9087eb7a932dd039f1
|
[
"MIT"
] | null | null | null |
league/views.py
|
MostafaMotahari/Sila-Website
|
0004ac5a502c1b9febfd8a9087eb7a932dd039f1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def home(request):
return render(request, 'league/home.html')
| 25.6
| 46
| 0.757813
| 18
| 128
| 5.388889
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 128
| 5
| 46
| 25.6
| 0.881818
| 0.179688
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
076366ada99e2823e53abfc2b768a2e4bca8fd60
| 214
|
py
|
Python
|
src/course/tasks.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/course/tasks.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
src/course/tasks.py
|
cbsBiram/xarala__ssr
|
863e1362c786daa752b942b796f7a015211d2f1b
|
[
"FSFAP"
] | null | null | null |
from celery import task
from send_mail.views import enroll_course_mail
@task
def enroll_course(student_email, course, order):
mail_sent = enroll_course_mail(student_email, course, order)
return mail_sent
| 23.777778
| 64
| 0.803738
| 32
| 214
| 5.0625
| 0.46875
| 0.222222
| 0.197531
| 0.283951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140187
| 214
| 8
| 65
| 26.75
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab416cfaf4471180352369681d41bed14c5e9f32
| 45,616
|
py
|
Python
|
tests/keras/legacy/interface_test.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 259
|
2016-02-09T09:06:29.000Z
|
2021-07-29T05:27:40.000Z
|
tests/keras/legacy/interface_test.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 50
|
2016-02-24T14:46:57.000Z
|
2020-01-20T07:34:19.000Z
|
tests/keras/legacy/interface_test.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 94
|
2016-02-17T20:59:27.000Z
|
2021-04-19T08:18:16.000Z
|
import pytest
import json
import keras
import keras.backend as K
import numpy as np
import os
def test_dense_legacy_interface():
old_layer = keras.layers.Dense(input_dim=3, output_dim=2, name='d')
new_layer = keras.layers.Dense(2, input_shape=(3,), name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Dense(2, bias=False, init='normal',
W_regularizer='l1',
W_constraint='maxnorm', name='d')
new_layer = keras.layers.Dense(2, use_bias=False,
kernel_initializer='normal',
kernel_regularizer='l1',
kernel_constraint='max_norm', name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Dense(2, bias=True,
b_regularizer='l1',
b_constraint='maxnorm', name='d')
new_layer = keras.layers.Dense(2, use_bias=True,
bias_regularizer='l1',
bias_constraint='max_norm', name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_dropout_legacy_interface():
old_layer = keras.layers.Dropout(p=3, name='drop')
new_layer1 = keras.layers.Dropout(rate=3, name='drop')
new_layer2 = keras.layers.Dropout(3, name='drop')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer1.get_config())
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer2.get_config())
def test_embedding_legacy_interface():
old_layer = keras.layers.Embedding(4, 2, name='d')
new_layer = keras.layers.Embedding(output_dim=2, input_dim=4, name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Embedding(input_dim=4, output_dim=2, name='d',
init='normal',
W_regularizer='l1',
W_constraint='maxnorm')
new_layer = keras.layers.Embedding(input_dim=4, output_dim=2, name='d',
embeddings_initializer='normal',
embeddings_regularizer='l1',
embeddings_constraint='max_norm')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Embedding(1, 1, dropout=0.0, name='d')
new_layer = keras.layers.Embedding(1, 1, name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_maxpooling1d_legacy_interface():
old_layer = keras.layers.MaxPool1D(pool_length=2,
border_mode='valid',
name='maxpool1d')
new_layer = keras.layers.MaxPool1D(pool_size=2,
padding='valid',
name='maxpool1d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPool1D(2, padding='valid', name='maxpool1d')
new_layer = keras.layers.MaxPool1D(pool_size=2,
padding='valid',
name='maxpool1d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_avgpooling1d_legacy_interface():
old_layer = keras.layers.AvgPool1D(pool_length=2,
border_mode='valid',
name='d')
new_layer = keras.layers.AvgPool1D(pool_size=2, padding='valid', name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AvgPool1D(2, padding='valid', name='d')
new_layer = keras.layers.AvgPool1D(pool_size=2, padding='valid', name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_prelu_legacy_interface():
old_layer = keras.layers.PReLU(init='zero', name='p')
new_layer = keras.layers.PReLU('zero', name='p')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_gaussiannoise_legacy_interface():
old_layer = keras.layers.GaussianNoise(sigma=0.5, name='gn')
new_layer = keras.layers.GaussianNoise(stddev=0.5, name='gn')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_lstm_legacy_interface():
old_layer = keras.layers.LSTM(input_shape=[3, 5], output_dim=2, name='d')
new_layer = keras.layers.LSTM(2, input_shape=[3, 5], name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.LSTM(input_shape=[3, 5], output_dim=2, name='d',
consume_less='mem')
new_layer = keras.layers.LSTM(2, input_shape=[3, 5], name='d', implementation=1)
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.LSTM(input_dim=5, input_length=3,
output_dim=2, name='d', consume_less='mem')
new_layer = keras.layers.LSTM(2, input_shape=[3, 5], name='d', implementation=1)
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.LSTM(input_dim=5,
output_dim=2, name='d', consume_less='mem')
new_layer = keras.layers.LSTM(2, input_shape=[None, 5], name='d',
implementation=1)
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.LSTM(input_shape=[3, 5], output_dim=2, name='d',
consume_less='gpu')
new_layer = keras.layers.LSTM(2, input_shape=[3, 5], name='d', implementation=2)
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.LSTM(2, init='normal',
inner_init='glorot_uniform',
forget_bias_init='one',
inner_activation='hard_sigmoid',
W_regularizer='l1',
U_regularizer='l1',
b_regularizer='l1',
dropout_W=0.1,
dropout_U=0.1,
name='LSTM')
new_layer = keras.layers.LSTM(2, kernel_initializer='normal',
recurrent_initializer='glorot_uniform',
unit_forget_bias=True,
recurrent_activation='hard_sigmoid',
kernel_regularizer='l1',
recurrent_regularizer='l1',
bias_regularizer='l1',
dropout=0.1,
recurrent_dropout=0.1,
name='LSTM')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.LSTM(2, init='normal',
inner_init='glorot_uniform',
forget_bias_init='zero',
inner_activation='hard_sigmoid',
W_regularizer='l1',
U_regularizer='l1',
b_regularizer='l1',
dropout_W=0.1,
dropout_U=0.1,
name='LSTM')
new_layer = keras.layers.LSTM(2, kernel_initializer='normal',
recurrent_initializer='glorot_uniform',
unit_forget_bias=True,
recurrent_activation='hard_sigmoid',
kernel_regularizer='l1',
recurrent_regularizer='l1',
bias_regularizer='l1',
dropout=0.1,
recurrent_dropout=0.1,
name='LSTM')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_simplernn_legacy_interface():
old_layer = keras.layers.SimpleRNN(input_shape=[3, 5], output_dim=2, name='d')
new_layer = keras.layers.SimpleRNN(2, input_shape=[3, 5], name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.SimpleRNN(2, init='normal',
inner_init='glorot_uniform',
W_regularizer='l1',
U_regularizer='l1',
b_regularizer='l1',
dropout_W=0.1,
dropout_U=0.1,
name='SimpleRNN')
new_layer = keras.layers.SimpleRNN(2, kernel_initializer='normal',
recurrent_initializer='glorot_uniform',
kernel_regularizer='l1',
recurrent_regularizer='l1',
bias_regularizer='l1',
dropout=0.1,
recurrent_dropout=0.1,
name='SimpleRNN')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_gru_legacy_interface():
old_layer = keras.layers.GRU(input_shape=[3, 5], output_dim=2, name='d')
new_layer = keras.layers.GRU(2, input_shape=[3, 5], name='d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GRU(2, init='normal',
inner_init='glorot_uniform',
inner_activation='hard_sigmoid',
W_regularizer='l1',
U_regularizer='l1',
b_regularizer='l1',
dropout_W=0.1,
dropout_U=0.1,
name='GRU')
new_layer = keras.layers.GRU(2, kernel_initializer='normal',
recurrent_initializer='glorot_uniform',
recurrent_activation='hard_sigmoid',
kernel_regularizer='l1',
recurrent_regularizer='l1',
bias_regularizer='l1',
dropout=0.1,
recurrent_dropout=0.1,
name='GRU')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_gaussiandropout_legacy_interface():
old_layer = keras.layers.GaussianDropout(p=0.6, name='drop')
new_layer1 = keras.layers.GaussianDropout(rate=0.6, name='drop')
new_layer2 = keras.layers.GaussianDropout(0.6, name='drop')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer1.get_config())
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer2.get_config())
def test_maxpooling2d_legacy_interface():
old_layer = keras.layers.MaxPooling2D(
pool_size=(2, 2), border_mode='valid', name='maxpool2d')
new_layer = keras.layers.MaxPool2D(
pool_size=2, padding='valid', name='maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling2D((2, 2), 2, 'valid', name='maxpool2d')
new_layer = keras.layers.MaxPool2D(
pool_size=2, strides=2, padding='valid', name='maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling2D(
(2, 2), padding='valid', dim_ordering='tf', name='maxpool2d')
new_layer = keras.layers.MaxPool2D(
pool_size=2, padding='valid', data_format='channels_last', name='maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling2D(
(2, 2), padding='valid', dim_ordering='th', name='maxpool2d')
new_layer = keras.layers.MaxPool2D(
pool_size=2, padding='valid', data_format='channels_first',
name='maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling2D(
(2, 2), padding='valid', dim_ordering='default', name='maxpool2d')
new_layer = keras.layers.MaxPool2D(
pool_size=2, padding='valid', name='maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_avgpooling2d_legacy_interface():
old_layer = keras.layers.AveragePooling2D(
pool_size=(2, 2), border_mode='valid', name='avgpooling2d')
new_layer = keras.layers.AvgPool2D(
pool_size=(2, 2), padding='valid', name='avgpooling2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling2D(
(2, 2), (2, 2), 'valid', name='avgpooling2d')
new_layer = keras.layers.AvgPool2D(
pool_size=(2, 2), strides=(2, 2), padding='valid', name='avgpooling2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling2D(
(2, 2), padding='valid', dim_ordering='tf', name='avgpooling2d')
new_layer = keras.layers.AvgPool2D(
pool_size=2, padding='valid', data_format='channels_last',
name='avgpooling2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling2D(
(2, 2), padding='valid', dim_ordering='th', name='avgpooling2d')
new_layer = keras.layers.AvgPool2D(
pool_size=2, padding='valid', data_format='channels_first',
name='avgpooling2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling2D(
(2, 2), padding='valid', dim_ordering='default', name='avgpooling2d')
new_layer = keras.layers.AvgPool2D(
pool_size=2, padding='valid', name='avgpooling2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_maxpooling3d_legacy_interface():
old_layer = keras.layers.MaxPooling3D(
pool_size=(2, 2, 2), border_mode='valid', name='maxpool3d')
new_layer = keras.layers.MaxPool3D(
pool_size=(2, 2, 2), padding='valid', name='maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling3D(
(2, 2, 2), (2, 2, 2), 'valid', name='maxpool3d')
new_layer = keras.layers.MaxPool3D(
pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling3D(
(2, 2, 2), padding='valid', dim_ordering='tf', name='maxpool3d')
new_layer = keras.layers.MaxPool3D(
pool_size=(2, 2, 2), padding='valid', data_format='channels_last',
name='maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling3D(
(2, 2, 2), padding='valid', dim_ordering='th', name='maxpool3d')
new_layer = keras.layers.MaxPool3D(
pool_size=(2, 2, 2), padding='valid', data_format='channels_first',
name='maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.MaxPooling3D(
(2, 2, 2), padding='valid', dim_ordering='default', name='maxpool3d')
new_layer = keras.layers.MaxPool3D(
pool_size=(2, 2, 2), padding='valid', name='maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_avgpooling3d_legacy_interface():
old_layer = keras.layers.AveragePooling3D(
pool_size=(2, 2, 2), border_mode='valid', name='avgpooling3d')
new_layer = keras.layers.AvgPool3D(
pool_size=(2, 2, 2), padding='valid', name='avgpooling3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling3D(
(2, 2, 2), (2, 2, 2), 'valid', name='avgpooling3d')
new_layer = keras.layers.AvgPool3D(
pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid',
name='avgpooling3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling3D(
(2, 2, 2), padding='valid', dim_ordering='tf', name='avgpooling3d')
new_layer = keras.layers.AvgPool3D(
pool_size=(2, 2, 2), padding='valid', data_format='channels_last',
name='avgpooling3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling3D(
(2, 2, 2), padding='valid', dim_ordering='th', name='avgpooling3d')
new_layer = keras.layers.AvgPool3D(
pool_size=(2, 2, 2), padding='valid', data_format='channels_first',
name='avgpooling3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.AveragePooling3D(
(2, 2, 2), padding='valid', dim_ordering='default', name='avgpooling3d')
new_layer = keras.layers.AvgPool3D(
pool_size=(2, 2, 2), padding='valid', name='avgpooling3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_global_maxpooling2d_legacy_interface():
old_layer = keras.layers.GlobalMaxPooling2D(dim_ordering='tf',
name='global_maxpool2d')
new_layer = keras.layers.GlobalMaxPool2D(data_format='channels_last',
name='global_maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalMaxPooling2D(dim_ordering='th',
name='global_maxpool2d')
new_layer = keras.layers.GlobalMaxPool2D(data_format='channels_first',
name='global_maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalMaxPooling2D(dim_ordering='default',
name='global_maxpool2d')
new_layer = keras.layers.GlobalMaxPool2D(name='global_maxpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_global_avgpooling2d_legacy_interface():
old_layer = keras.layers.GlobalAveragePooling2D(dim_ordering='tf',
name='global_avgpool2d')
new_layer = keras.layers.GlobalAvgPool2D(data_format='channels_last',
name='global_avgpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalAveragePooling2D(dim_ordering='th',
name='global_avgpool2d')
new_layer = keras.layers.GlobalAvgPool2D(data_format='channels_first',
name='global_avgpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalAveragePooling2D(dim_ordering='default',
name='global_avgpool2d')
new_layer = keras.layers.GlobalAvgPool2D(name='global_avgpool2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_global_maxpooling3d_legacy_interface():
old_layer = keras.layers.GlobalMaxPooling3D(dim_ordering='tf',
name='global_maxpool3d')
new_layer = keras.layers.GlobalMaxPool3D(data_format='channels_last',
name='global_maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalMaxPooling3D(dim_ordering='th',
name='global_maxpool3d')
new_layer = keras.layers.GlobalMaxPool3D(data_format='channels_first',
name='global_maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalMaxPooling3D(dim_ordering='default',
name='global_maxpool3d')
new_layer = keras.layers.GlobalMaxPool3D(name='global_maxpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_global_avgpooling3d_legacy_interface():
old_layer = keras.layers.GlobalAveragePooling3D(dim_ordering='tf',
name='global_avgpool3d')
new_layer = keras.layers.GlobalAvgPool3D(data_format='channels_last',
name='global_avgpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalAveragePooling3D(dim_ordering='th',
name='global_avgpool3d')
new_layer = keras.layers.GlobalAvgPool3D(data_format='channels_first',
name='global_avgpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.GlobalAveragePooling3D(dim_ordering='default',
name='global_avgpool3d')
new_layer = keras.layers.GlobalAvgPool3D(name='global_avgpool3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_upsampling1d_legacy_interface():
old_layer = keras.layers.UpSampling1D(length=3, name='us1d')
new_layer_1 = keras.layers.UpSampling1D(size=3, name='us1d')
new_layer_2 = keras.layers.UpSampling1D(3, name='us1d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_1.get_config())
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_2.get_config())
def test_upsampling2d_legacy_interface():
old_layer = keras.layers.UpSampling2D((2, 2), dim_ordering='tf', name='us2d')
new_layer = keras.layers.UpSampling2D((2, 2), data_format='channels_last',
name='us2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_upsampling3d_legacy_interface():
old_layer = keras.layers.UpSampling3D((2, 2, 2),
dim_ordering='tf',
name='us3d')
new_layer = keras.layers.UpSampling3D((2, 2, 2),
data_format='channels_last',
name='us3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_conv2d_legacy_interface():
old_layer = keras.layers.Convolution2D(5, 3, 3, name='conv')
new_layer = keras.layers.Conv2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution2D(5, 3, nb_col=3, name='conv')
new_layer = keras.layers.Conv2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution2D(5, nb_row=3, nb_col=3, name='conv')
new_layer = keras.layers.Conv2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution2D(5, 3, 3,
init='normal',
subsample=(2, 2),
border_mode='valid',
dim_ordering='th',
W_regularizer='l1',
b_regularizer='l2',
W_constraint='maxnorm',
b_constraint='unitnorm',
name='conv')
new_layer = keras.layers.Conv2D(5, (3, 3),
kernel_initializer='normal',
strides=(2, 2),
padding='valid',
kernel_regularizer='l1',
bias_regularizer='l2',
kernel_constraint='max_norm',
bias_constraint='unit_norm',
data_format='channels_first',
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_deconv2d_legacy_interface():
old_layer = keras.layers.Deconvolution2D(5, 3, 3, (6, 7, 5), name='deconv')
new_layer = keras.layers.Conv2DTranspose(5, (3, 3), name='deconv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Deconvolution2D(5, 3, 3, output_shape=(6, 7, 5),
name='deconv')
new_layer = keras.layers.Conv2DTranspose(5, (3, 3), name='deconv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Deconvolution2D(5, 3, nb_col=3, output_shape=(6, 7, 5),
name='deconv')
new_layer = keras.layers.Conv2DTranspose(5, (3, 3), name='deconv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Deconvolution2D(5, nb_row=3, nb_col=3,
output_shape=(6, 7, 5), name='deconv')
new_layer = keras.layers.Conv2DTranspose(5, (3, 3), name='deconv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Deconvolution2D(5, 3, 3,
output_shape=(6, 7, 5),
init='normal',
subsample=(2, 2),
border_mode='valid',
dim_ordering='th',
W_regularizer='l1',
b_regularizer='l2',
W_constraint='maxnorm',
b_constraint='unitnorm',
name='conv')
new_layer = keras.layers.Conv2DTranspose(
5, (3, 3),
kernel_initializer='normal',
strides=(2, 2),
padding='valid',
kernel_regularizer='l1',
bias_regularizer='l2',
kernel_constraint='max_norm',
bias_constraint='unit_norm',
data_format='channels_first',
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_conv1d_legacy_interface():
old_layer = keras.layers.Convolution1D(5,
filter_length=3,
input_dim=3,
input_length=4,
name='conv')
new_layer = keras.layers.Conv1D(5, 3, name='conv', input_shape=(4, 3))
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution1D(5, 3,
init='normal',
subsample_length=2,
border_mode='valid',
W_regularizer='l1',
b_regularizer='l2',
W_constraint='maxnorm',
b_constraint='unitnorm',
name='conv')
new_layer = keras.layers.Conv1D(5, 3,
kernel_initializer='normal',
strides=2,
padding='valid',
kernel_regularizer='l1',
bias_regularizer='l2',
kernel_constraint='max_norm',
bias_constraint='unit_norm',
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_separable_conv2d_legacy_interface():
old_layer = keras.layers.SeparableConv2D(5, 3, 3, name='conv')
new_layer = keras.layers.SeparableConv2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.SeparableConv2D(5, 3, nb_col=3, name='conv')
new_layer = keras.layers.SeparableConv2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.SeparableConv2D(5, nb_row=3, nb_col=3, name='conv')
new_layer = keras.layers.SeparableConv2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.SeparableConv2D(5, 3, 3,
init='normal',
subsample=(2, 2),
border_mode='valid',
dim_ordering='th',
depthwise_regularizer='l1',
b_regularizer='l2',
depthwise_constraint='maxnorm',
b_constraint='unitnorm',
name='conv')
new_layer = keras.layers.SeparableConv2D(5, (3, 3),
depthwise_initializer='normal',
pointwise_initializer='normal',
strides=(2, 2),
padding='valid',
depthwise_regularizer='l1',
bias_regularizer='l2',
depthwise_constraint='max_norm',
bias_constraint='unit_norm',
data_format='channels_first',
name='conv')
old_config = json.dumps(old_layer.get_config())
new_config = json.dumps(new_layer.get_config())
assert old_config == new_config
def test_conv3d_legacy_interface():
old_layer = keras.layers.Convolution3D(5, 3, 3, 4, name='conv')
new_layer = keras.layers.Conv3D(5, (3, 3, 4), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution3D(5, 3, 3, kernel_dim3=4, name='conv')
new_layer = keras.layers.Conv3D(5, (3, 3, 4), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution3D(5, 3,
kernel_dim2=3,
kernel_dim3=4,
name='conv')
new_layer = keras.layers.Conv3D(5, (3, 3, 4), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution3D(5,
kernel_dim1=3,
kernel_dim2=3,
kernel_dim3=4,
name='conv')
new_layer = keras.layers.Conv3D(5, (3, 3, 4), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.Convolution3D(5, 3, 3, 4,
init='normal',
subsample=(2, 2, 2),
border_mode='valid',
dim_ordering='th',
W_regularizer='l1',
b_regularizer='l2',
W_constraint='maxnorm',
b_constraint='unitnorm',
name='conv')
new_layer = keras.layers.Conv3D(5, (3, 3, 4),
kernel_initializer='normal',
strides=(2, 2, 2),
padding='valid',
kernel_regularizer='l1',
bias_regularizer='l2',
kernel_constraint='max_norm',
bias_constraint='unit_norm',
data_format='channels_first',
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_convlstm2d_legacy_interface():
old_layer = keras.layers.ConvLSTM2D(5, 3, 3, name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.ConvLSTM2D(5, 3, nb_col=3, name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.ConvLSTM2D(5, nb_row=3, nb_col=3, name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3), name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.ConvLSTM2D(5, 3, 3,
init='normal',
inner_init='uniform',
forget_bias_init='one',
inner_activation='relu',
subsample=(2, 2),
border_mode='valid',
dim_ordering='th',
W_regularizer='l1',
U_regularizer='l2',
b_regularizer='l2',
dropout_W=0.2,
dropout_U=0.1,
name='conv')
new_layer = keras.layers.ConvLSTM2D(5, (3, 3),
kernel_initializer='normal',
recurrent_initializer='uniform',
unit_forget_bias=True,
recurrent_activation='relu',
strides=(2, 2),
padding='valid',
kernel_regularizer='l1',
recurrent_regularizer='l2',
bias_regularizer='l2',
data_format='channels_first',
dropout=0.2,
recurrent_dropout=0.1,
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_batchnorm_legacy_interface():
old_layer = keras.layers.BatchNormalization(mode=0, name='bn')
new_layer = keras.layers.BatchNormalization(name='bn')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
old_layer = keras.layers.BatchNormalization(mode=0,
beta_init='one',
gamma_init='uniform',
name='bn')
new_layer = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='uniform',
name='bn')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_atrousconv1d_legacy_interface():
old_layer = keras.layers.AtrousConvolution1D(5, 3,
init='normal',
subsample_length=2,
border_mode='valid',
W_regularizer='l1',
b_regularizer='l2',
W_constraint='maxnorm',
b_constraint='unitnorm',
atrous_rate=2,
name='conv')
new_layer = keras.layers.Conv1D(5, 3,
kernel_initializer='normal',
strides=2,
padding='valid',
kernel_regularizer='l1',
bias_regularizer='l2',
kernel_constraint='max_norm',
bias_constraint='unit_norm',
dilation_rate=2,
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_atrousconv2d_legacy_interface():
old_layer = keras.layers.AtrousConvolution2D(
5, 3, 3,
atrous_rate=(2, 2),
init='normal',
subsample=(2, 2),
border_mode='valid',
dim_ordering='th',
W_regularizer='l1',
b_regularizer='l2',
W_constraint='maxnorm',
b_constraint='unitnorm',
name='conv')
new_layer = keras.layers.Conv2D(5, (3, 3),
kernel_initializer='normal',
strides=(2, 2),
padding='valid',
kernel_regularizer='l1',
bias_regularizer='l2',
kernel_constraint='max_norm',
bias_constraint='unit_norm',
data_format='channels_first',
dilation_rate=(2, 2),
name='conv')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_zeropadding2d_legacy_interface():
old_layer = keras.layers.ZeroPadding2D(padding={'right_pad': 4,
'bottom_pad': 2,
'top_pad': 1,
'left_pad': 3},
dim_ordering='tf',
name='zp2d')
new_layer = keras.layers.ZeroPadding2D(((1, 2), (3, 4)),
data_format='channels_last',
name='zp2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_zeropadding3d_legacy_interface():
old_layer = keras.layers.ZeroPadding3D((2, 2, 2),
dim_ordering='tf',
name='zp3d')
new_layer = keras.layers.ZeroPadding3D((2, 2, 2),
data_format='channels_last',
name='zp3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_cropping2d_legacy_interface():
old_layer = keras.layers.Cropping2D(dim_ordering='tf', name='c2d')
new_layer = keras.layers.Cropping2D(data_format='channels_last', name='c2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def test_cropping3d_legacy_interface():
old_layer = keras.layers.Cropping3D(dim_ordering='tf', name='c3d')
new_layer = keras.layers.Cropping3D(data_format='channels_last', name='c3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())
def DISABLED_test_generator_methods_interface():
"""This test may cause Travis to hang."""
def train_generator():
x = np.random.randn(2, 2)
y = np.random.randint(0, 2, size=[2, 1])
while True:
yield (x, y)
def val_generator():
x = np.random.randn(2, 2)
y = np.random.randint(0, 2, size=[2, 1])
while True:
yield (x, y)
def pred_generator():
x = np.random.randn(1, 2)
while True:
yield x
x = keras.layers.Input(shape=(2, ))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(inputs=x, outputs=y)
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit_generator(generator=train_generator(),
samples_per_epoch=1,
validation_data=val_generator(),
nb_val_samples=1,
nb_worker=1, pickle_safe=True, max_q_size=3)
model.evaluate_generator(generator=train_generator(),
val_samples=2,
nb_worker=1, pickle_safe=False, max_q_size=3)
model.predict_generator(generator=pred_generator(),
val_samples=2,
nb_worker=1, pickle_safe=False, max_q_size=3)
def test_spatialdropout1d_legacy_interface():
old_layer = keras.layers.SpatialDropout1D(p=0.6, name='sd1d')
new_layer_1 = keras.layers.SpatialDropout1D(rate=0.6, name='sd1d')
new_layer_2 = keras.layers.SpatialDropout1D(0.6, name='sd1d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_1.get_config())
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_2.get_config())
def test_spatialdropout2d_legacy_interface():
old_layer = keras.layers.SpatialDropout2D(p=0.5,
dim_ordering='tf',
name='sd2d')
new_layer_1 = keras.layers.SpatialDropout2D(rate=0.5,
data_format='channels_last',
name='sd2d')
new_layer_2 = keras.layers.SpatialDropout2D(0.5,
data_format='channels_last',
name='sd2d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_1.get_config())
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_2.get_config())
def test_spatialdropout3d_legacy_interface():
old_layer = keras.layers.SpatialDropout3D(p=0.5,
dim_ordering='tf',
name='sd3d')
new_layer_1 = keras.layers.SpatialDropout3D(rate=0.5,
data_format='channels_last',
name='sd3d')
new_layer_2 = keras.layers.SpatialDropout3D(0.5,
data_format='channels_last',
name='sd3d')
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_1.get_config())
assert json.dumps(old_layer.get_config()) == json.dumps(new_layer_2.get_config())
if __name__ == '__main__':
pytest.main([__file__])
| 51.253933
| 85
| 0.527797
| 4,789
| 45,616
| 4.772186
| 0.049697
| 0.079548
| 0.116391
| 0.075129
| 0.902468
| 0.873064
| 0.818544
| 0.764111
| 0.729457
| 0.719305
| 0
| 0.030442
| 0.355489
| 45,616
| 889
| 86
| 51.311586
| 0.746905
| 0.000767
| 0
| 0.641026
| 0
| 0
| 0.066835
| 0.00068
| 0
| 0
| 0
| 0
| 0.136302
| 1
| 0.05668
| false
| 0
| 0.008097
| 0
| 0.064777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab5301b21fecb4d85d23621790a84b39c68199fd
| 3,844
|
py
|
Python
|
tests/modules/collaborations/resources/utils.py
|
karenc/houston
|
4eaaaf11d61394035e34b55bb847ea7eb4099c61
|
[
"Apache-2.0"
] | null | null | null |
tests/modules/collaborations/resources/utils.py
|
karenc/houston
|
4eaaaf11d61394035e34b55bb847ea7eb4099c61
|
[
"Apache-2.0"
] | 2
|
2021-03-16T20:28:06.000Z
|
2021-03-29T15:54:11.000Z
|
tests/modules/collaborations/resources/utils.py
|
karenc/houston
|
4eaaaf11d61394035e34b55bb847ea7eb4099c61
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Collaboration resources utils
-------------
"""
import json
from tests import utils as test_utils
PATH = '/api/v1/collaborations/'
def create_collaboration(
flask_app_client, user, data, expected_status_code=200, expected_error=''
):
if user:
with flask_app_client.login(user, auth_scopes=('collaborations:write',)):
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
else:
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid', 'members'})
elif 400 <= expected_status_code < 500:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def patch_collaboration(
flask_app_client,
collaboration_guid,
user,
data,
expected_status_code=200,
expected_error='',
):
with flask_app_client.login(user, auth_scopes=('collaborations:write',)):
response = flask_app_client.patch(
'%s%s' % (PATH, collaboration_guid),
content_type='application/json',
data=json.dumps(data),
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
return response
def read_collaboration(
flask_app_client, user, collaboration_guid, expected_status_code=200
):
if user:
with flask_app_client.login(user, auth_scopes=('collaborations:read',)):
response = flask_app_client.get(f'{PATH}{collaboration_guid}')
else:
response = flask_app_client.get(f'{PATH}{collaboration_guid}')
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
elif expected_status_code == 404:
test_utils.validate_dict_response(response, expected_status_code, {'message'})
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def read_all_collaborations(flask_app_client, user, expected_status_code=200):
with flask_app_client.login(user, auth_scopes=('collaborations:read',)):
response = flask_app_client.get(PATH)
if expected_status_code == 200:
test_utils.validate_list_response(response, 200)
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def request_edit(
flask_app_client,
collaboration_guid,
user,
expected_status_code=200,
expected_error='',
):
with flask_app_client.login(user, auth_scopes=('collaborations:write',)):
response = flask_app_client.post(
f'{PATH}edit_request/{collaboration_guid}',
content_type='application/json',
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
return response
| 31
| 86
| 0.650884
| 426
| 3,844
| 5.549296
| 0.143192
| 0.112521
| 0.14467
| 0.097716
| 0.86379
| 0.820643
| 0.767343
| 0.767343
| 0.729272
| 0.676819
| 0
| 0.019035
| 0.234651
| 3,844
| 123
| 87
| 31.252033
| 0.7845
| 0.01717
| 0
| 0.7
| 0
| 0
| 0.115119
| 0.030239
| 0
| 0
| 0
| 0
| 0.03
| 1
| 0.05
| false
| 0
| 0.02
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbb2d7af55f3279c773481e3feff84df27d630ee
| 2,138
|
py
|
Python
|
epytope/Data/pssms/tepitopepan/mat/DRB5_0102_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/tepitopepan/mat/DRB5_0102_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/tepitopepan/mat/DRB5_0102_9.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
DRB5_0102_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.004754, 'I': -0.99525, 'H': -999.0, 'K': -999.0, 'M': -0.99525, 'L': -0.99525, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.004754, 'V': -0.99525, 'Y': -0.004754}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.3011, 'D': -1.899, 'G': -1.5999, 'F': -0.59603, 'I': 1.2978, 'H': -1.3956, 'K': -1.695, 'M': 1.6968, 'L': 0.60049, 'N': -1.694, 'Q': -0.6989, 'P': -1.5003, 'S': -0.50003, 'R': -1.6955, 'T': 0.29704, 'W': -1.3947, 'V': 1.0967, 'Y': -0.59967}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -2.0009, 'D': -2.0009, 'G': -0.30056, 'F': -1.6984, 'I': -1.3984, 'H': -1.1993, 'K': -1.4993, 'M': -1.4987, 'L': -0.99984, 'N': -1.2987, 'Q': -1.3995, 'P': 0.19872, 'S': -0.498, 'R': -1.2996, 'T': -0.79867, 'W': -1.6986, 'V': -1.2974, 'Y': -1.0}, 6: {'A': 0.0, 'E': -0.92291, 'D': -1.5198, 'G': 0.57609, 'F': 1.4532, 'I': 1.1677, 'H': 1.1587, 'K': 0.86634, 'M': 0.39529, 'L': 0.59238, 'N': 0.46551, 'Q': 0.66154, 'P': -0.60657, 'S': -0.22181, 'R': 1.2601, 'T': 0.24976, 'W': 0.37214, 'V': -0.31077, 'Y': 1.156}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.47523, 'D': -0.96506, 'G': 0.32995, 'F': 0.90991, 'I': 0.76724, 'H': 0.033535, 'K': 0.77441, 'M': 0.82187, 'L': 0.38182, 'N': -0.43352, 'Q': 0.12656, 'P': -0.52079, 'S': 0.93407, 'R': 1.1065, 'T': -0.42469, 'W': -0.90037, 'V': 0.085936, 'Y': 0.57617}}
| 2,138
| 2,138
| 0.39102
| 525
| 2,138
| 1.588571
| 0.198095
| 0.117506
| 0.028777
| 0.038369
| 0.224221
| 0.146283
| 0.146283
| 0.146283
| 0.136691
| 0.136691
| 0
| 0.369541
| 0.16464
| 2,138
| 1
| 2,138
| 2,138
| 0.097424
| 0
| 0
| 0
| 0
| 0
| 0.079944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9163378f3bbf0b546bb1849dd72f326f1182adab
| 37
|
py
|
Python
|
aim/storage/treeutils.py
|
admariner/aim
|
4c143ea40acf3531abfa69f66503428d73d9fedc
|
[
"Apache-2.0"
] | null | null | null |
aim/storage/treeutils.py
|
admariner/aim
|
4c143ea40acf3531abfa69f66503428d73d9fedc
|
[
"Apache-2.0"
] | null | null | null |
aim/storage/treeutils.py
|
admariner/aim
|
4c143ea40acf3531abfa69f66503428d73d9fedc
|
[
"Apache-2.0"
] | null | null | null |
from aim.storage.treeutils_ import *
| 18.5
| 36
| 0.810811
| 5
| 37
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91758349b24d933ffaa9b5fc3f55b633eb142ef2
| 7,182
|
py
|
Python
|
models.py
|
makarovartyom/Udacity-CVND-P1-Facial-Keypoints-detection-with-CNNs
|
c4171d3ebd70b7c2ef66020f3f357f6322f2daec
|
[
"MIT"
] | null | null | null |
models.py
|
makarovartyom/Udacity-CVND-P1-Facial-Keypoints-detection-with-CNNs
|
c4171d3ebd70b7c2ef66020f3f357f6322f2daec
|
[
"MIT"
] | null | null | null |
models.py
|
makarovartyom/Udacity-CVND-P1-Facial-Keypoints-detection-with-CNNs
|
c4171d3ebd70b7c2ef66020f3f357f6322f2daec
|
[
"MIT"
] | null | null | null |
## TODO: define the convolutional neural network architecture
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
# custom Neural Network
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
# First, define Convolutional layers
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.conv4 = nn.Conv2d(128, 256, 3)
self.conv5 = nn.Conv2d(256, 512, 1)
# Then we use maxpool layer with kernel_size = 2, stride = 2
self.pool = nn.MaxPool2d(kernel_size = 2, stride = 2)
# We will apply Batch normalization after each Conv layer
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm2d(512)
# Series of fully-connected layer
self.fc1 = nn.Linear(512*6*6, 1024)
self.fc2 = nn.Linear(1024, 136)
# To avoid overfitting, we'll use Dropout after each FC with increasing probability
self.fc1_drop = nn.Dropout(p=0.4)
# Glorot uniform initialization (Xavier uniform initialization) for fully-connected layers
# based on NaimishNet parepr: https://arxiv.org/pdf/1710.00977.pdf
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# forward behaviour for convolutional layers
# blocks: CONV -> ReLU -> BN -> MAXPOOL
x = F.relu(self.conv1(x))
x = self.bn1(x)
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.bn2(x)
x = self.pool(x)
x = F.relu(self.conv3(x))
x = self.bn3(x)
x = self.pool(x)
x = F.relu(self.conv4(x))
x = self.bn4(x)
x = self.pool(x)
x = F.relu(self.conv5(x))
x = self.bn5(x)
x = self.pool(x)
# flatten layer
x = x.view(x.size(0), -1)
# forward behaviour for fully-connected layers
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
x = self.fc2(x)
# a modified x, having gone through all the layers of your model, should be returned
return x
# baseline network architecture
class BaseNet(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
# First, define Convolutional layers
# first convolutional block
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv2 = nn.Conv2d(32, 32, 5)
# second convolutional block
self.conv3 = nn.Conv2d(32, 64, 3)
self.conv4 = nn.Conv2d(64, 64, 3)
# third convolutional block
self.conv5 = nn.Conv2d(64, 128, 3)
self.conv6 = nn.Conv2d(128, 128, 3)
# fourth convolutional block
self.conv7 = nn.Conv2d(128, 256, 1)
self.conv8 = nn.Conv2d(256, 256, 1)
# Then we use maxpool layer with kernel_size = 3, stride = 3
self.pool = nn.MaxPool2d(kernel_size = 2, stride = 2)
# Series of fully-connected layer
self.fc1 = nn.Linear(256*11*11, 1024)
self.fc2 = nn.Linear(1024, 136)
# To avoid overfitting, we'll use Dropout after each FC with increasing probability
self.conv1_drop = nn.Dropout(p=0.1)
self.conv2_drop = nn.Dropout(p=0.1)
self.conv3_drop = nn.Dropout(p=0.2)
self.conv4_drop = nn.Dropout(p=0.2)
self.conv5_drop = nn.Dropout(p=0.3)
self.conv6_drop = nn.Dropout(p=0.3)
self.conv7_drop = nn.Dropout(p=0.4)
self.conv8_drop = nn.Dropout(p=0.4)
self.fc1_drop = nn.Dropout(p=0.6)
# Glorot uniform initialization (Xavier uniform initialization) for fully-connected layers
# based on NaimishNet parepr: https://arxiv.org/pdf/1710.00977.pdf
#nn.init.xavier_uniform_(self.fc1.weight)
#nn.init.xavier_uniform_(self.fc2.weight)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# forward behaviour for convolutional layers
# blocks: CONV -> BN -> CONV - MAXPOOL
x = F.relu(self.conv1(x))
x = self.conv1_drop(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = self.conv2_drop(x)
x = F.relu(self.conv3(x))
x = self.conv3_drop(x)
x = F.relu(self.conv4(x))
x = self.pool(x)
x = self.conv4_drop(x)
x = F.relu(self.conv5(x))
x = self.conv5_drop(x)
x = F.relu(self.conv6(x))
x = self.pool(x)
x = self.conv6_drop(x)
x = F.relu(self.conv7(x))
x = self.conv7_drop(x)
x = F.relu(self.conv8(x))
x = self.pool(x)
x = self.conv8_drop(x)
# flatten layer
x = x.view(x.size(0), -1)
# forward behaviour for fully-connected layers
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
x = self.fc2(x)
# a modified x, having gone through all the layers of your model, should be returned
return x
| 34.528846
| 153
| 0.615427
| 1,078
| 7,182
| 4.055659
| 0.186456
| 0.017841
| 0.035682
| 0.034309
| 0.818847
| 0.808783
| 0.780878
| 0.724154
| 0.724154
| 0.609332
| 0
| 0.055749
| 0.280702
| 7,182
| 207
| 154
| 34.695652
| 0.790554
| 0.472013
| 0
| 0.44086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004831
| 0
| 1
| 0.043011
| false
| 0
| 0.043011
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91afb49649269bb7ecfc62f87eb3f9596811f14a
| 9,466
|
py
|
Python
|
tests/test_pdspi_fhir_example.py
|
RENCI/pdspi-fhir-example
|
c893e5b51f73407c4fe7898fc61d7f42ae421d98
|
[
"MIT"
] | null | null | null |
tests/test_pdspi_fhir_example.py
|
RENCI/pdspi-fhir-example
|
c893e5b51f73407c4fe7898fc61d7f42ae421d98
|
[
"MIT"
] | null | null | null |
tests/test_pdspi_fhir_example.py
|
RENCI/pdspi-fhir-example
|
c893e5b51f73407c4fe7898fc61d7f42ae421d98
|
[
"MIT"
] | 2
|
2019-10-10T19:12:59.000Z
|
2019-12-05T18:58:49.000Z
|
import requests
import time
from tx.fhir.utils import bundle, unbundle
import sys
import json
import os.path
# from tx.test.utils import bag_equal
patient_id = "1000"
patient_id2 = "2000"
patient_id3 = "0000" # non-existent
patient_resc = {
"id": patient_id,
"resourceType": "Patient"
}
patient_resc2 = {
"id": patient_id2,
"resourceType": "Patient"
}
observation_resc = {
"resourceType": "Observation",
"subject": {
"reference": f"Patient/{patient_id}"
}
}
condition_resc = {
"resourceType": "Condition",
"subject": {
"reference": f"Patient/{patient_id}"
}
}
observation_resc2 = {
"resourceType": "Observation",
"subject": {
"reference": f"Patient/{patient_id2}"
}
}
condition_resc2 = {
"resourceType": "Condition",
"subject": {
"reference": f"Patient/{patient_id2}"
}
}
medication_request_resc = {
"resourceType": "MedicationRequest",
"subject": {
"reference": f"Patient/{patient_id}"
}
}
medication_request_resc2 = {
"resourceType": "MedicationRequest",
"subject": {
"reference": f"Patient/{patient_id2}"
}
}
php = "http://pdspi-fhir-example:8080"
def test_post_patient():
try:
resp1 = requests.post(f"{php}/Patient", json=patient_resc)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Patient/{patient_id}")
assert resp2.status_code == 200
assert resp2.json() == patient_resc
finally:
requests.delete(f"{php}/resource")
def test_post_patient2():
try:
resp1 = requests.post(f"{php}/Patient", json=patient_resc)
assert resp1.status_code == 200
resp1 = requests.post(f"{php}/Patient", json=patient_resc2)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Patient/{patient_id}")
assert resp2.status_code == 200
assert resp2.json() == patient_resc
finally:
requests.delete(f"{php}/resource")
def test_post_patient_404():
try:
resp1 = requests.post(f"{php}/Patient", json=patient_resc)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Patient/{patient_id3}")
assert resp2.status_code == 404
finally:
requests.delete(f"{php}/resource")
def test_post_observation():
try:
resp1 = requests.post(f"{php}/Observation", json=observation_resc)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Observation?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([observation_resc])
finally:
requests.delete(f"{php}/resource")
def test_post_condition():
try:
resp1 = requests.post(f"{php}/Condition", json=condition_resc)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Condition?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([condition_resc])
finally:
requests.delete(f"{php}/resource")
def test_post_observation2():
try:
resp1 = requests.post(f"{php}/Observation", json=observation_resc)
assert resp1.status_code == 200
resp1 = requests.post(f"{php}/Observation", json=observation_resc2)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Observation?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([observation_resc])
finally:
requests.delete(f"{php}/resource")
def test_post_condition2():
try:
resp1 = requests.post(f"{php}/Condition", json=condition_resc)
assert resp1.status_code == 200
resp1 = requests.post(f"{php}/Condition", json=condition_resc2)
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Condition?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([condition_resc])
finally:
requests.delete(f"{php}/resource")
def test_post_bundle_patient():
try:
resp1 = requests.post(f"{php}/Bundle", json=bundle([patient_resc, patient_resc2]))
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Patient/{patient_id}")
assert resp2.status_code == 200
assert resp2.json() == patient_resc
finally:
requests.delete(f"{php}/resource")
def test_post_bundle_observation():
try:
resp1 = requests.post(f"{php}/Bundle", json=bundle([observation_resc, observation_resc2]))
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Observation?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([observation_resc])
finally:
requests.delete(f"{php}/resource")
def test_post_bundle_condition():
try:
resp1 = requests.post(f"{php}/Bundle", json=bundle([condition_resc, condition_resc2]))
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/Condition?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([condition_resc])
finally:
requests.delete(f"{php}/resource")
def test_post_bundle_medication_request():
try:
resp1 = requests.post(f"{php}/Bundle", json=bundle([medication_request_resc, medication_request_resc2]))
assert resp1.status_code == 200
resp2 = requests.get(f"{php}/MedicationRequest?patient={patient_id}")
assert resp2.status_code == 200
assert resp2.json() == bundle([medication_request_resc])
finally:
requests.delete(f"{php}/resource")
config = {
"title": "FHIR data provider",
"pluginType": "f",
"pluginTypeTitle": "FHIR",
"settingsDefaults": {
"pluginSelectors": []
}
}
def test_post_resources():
try:
resp1 = requests.post(f"{php}/Patient", json=patient_resc)
resp1 = requests.post(f"{php}/Patient", json=patient_resc2)
resp1 = requests.post(f"{php}/Observation", json=observation_resc)
resp1 = requests.post(f"{php}/Observation", json=observation_resc2)
resp1 = requests.post(f"{php}/Condition", json=condition_resc)
resp1 = requests.post(f"{php}/Condition", json=condition_resc2)
resp1 = requests.post(f"{php}/MedicationRequest", json=medication_request_resc)
resp1 = requests.post(f"{php}/MedicationRequest", json=medication_request_resc2)
resp1 = requests.post(f"{php}/resource", json={
"resourceTypes": ["Patient", "Observation", "Condition", "MedicationRequest"],
"patientIds": [patient_id, patient_id2]
})
assert resp1.status_code == 200
patients = resp1.json()
assert len(patients) == 2
for patient in patients:
assert patient["resourceType"] == "Bundle"
assert patient["type"] == "batch-response"
assert set(map(lambda x: x["resourceType"], unbundle(patient).value)) == {"Patient", "Bundle"}
finally:
requests.delete(f"{php}/resource")
def test_post_resources_output_to_file():
try:
resp1 = requests.post(f"{php}/Patient", json=patient_resc)
resp1 = requests.post(f"{php}/Patient", json=patient_resc2)
resp1 = requests.post(f"{php}/Observation", json=observation_resc)
resp1 = requests.post(f"{php}/Observation", json=observation_resc2)
resp1 = requests.post(f"{php}/Condition", json=condition_resc)
resp1 = requests.post(f"{php}/Condition", json=condition_resc2)
resp1 = requests.post(f"{php}/MedicationRequest", json=medication_request_resc)
resp1 = requests.post(f"{php}/MedicationRequest", json=medication_request_resc2)
files = [patient_id, patient_id2]
resp = requests.post(f"{php}/resource", json={
"resourceTypes": ["Patient", "Observation", "Condition", "MedicationRequest"],
"patientIds": files,
"outputFile": "outputname"
})
assert resp.status_code == 200
assert "$ref" in resp.json()
name = resp.json()["$ref"]
patients = []
for f in files:
with open(os.path.join(os.environ.get("OUTPUT_DIR"), name, f + ".json")) as out:
patients.append(json.load(out))
assert len(patients) == 2
for patient in patients:
assert patient["resourceType"] == "Bundle"
assert patient["type"] == "batch-response"
assert set(map(lambda x: x["resourceType"], unbundle(patient).value)) == {"Patient", "Bundle"}
finally:
requests.delete(f"{php}/resource")
def test_config():
resp = requests.get(f"{php}/config")
assert resp.status_code == 200
assert resp.json() == config
def test_ui():
resp = requests.get(f"{php}/ui")
assert resp.status_code == 200
# def test_get_patient_ids():
# try:
# resp1 = requests.post(f"{php}/Bundle", json=bundle([patient_resc, patient_resc2]))
# assert resp1.status_code == 200
# resp2 = requests.get(f"{php}/Patient")
# assert resp2.status_code == 200
# assert bag_equal(resp2.json(), [patient_resc["id"], patient_resc2["id"]])
# finally:
# requests.delete(f"{php}/resource")
| 26.367688
| 112
| 0.622544
| 1,073
| 9,466
| 5.338304
| 0.095061
| 0.042598
| 0.074895
| 0.092179
| 0.828212
| 0.806739
| 0.782123
| 0.708275
| 0.708275
| 0.672137
| 0
| 0.031888
| 0.234735
| 9,466
| 358
| 113
| 26.441341
| 0.758835
| 0.046588
| 0
| 0.641593
| 0
| 0
| 0.210964
| 0.058373
| 0
| 0
| 0
| 0
| 0.216814
| 1
| 0.066372
| false
| 0
| 0.026549
| 0
| 0.09292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91ddf23798f12b69aed5400764426f67520312d4
| 26,624
|
py
|
Python
|
sdks/python/client/argo_workflows/model/object_meta.py
|
AnuragThePathak/argo-workflows
|
1d71fb3c4ebdb2891435ed12257743331ff34436
|
[
"Apache-2.0"
] | 1
|
2022-02-24T01:45:03.000Z
|
2022-02-24T01:45:03.000Z
|
sdks/python/client/argo_workflows/model/object_meta.py
|
AnuragThePathak/argo-workflows
|
1d71fb3c4ebdb2891435ed12257743331ff34436
|
[
"Apache-2.0"
] | 18
|
2022-02-01T23:09:58.000Z
|
2022-03-31T23:28:41.000Z
|
sdks/python/client/argo_workflows/model/object_meta.py
|
AnuragThePathak/argo-workflows
|
1d71fb3c4ebdb2891435ed12257743331ff34436
|
[
"Apache-2.0"
] | null | null | null |
"""
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.managed_fields_entry import ManagedFieldsEntry
from argo_workflows.model.owner_reference import OwnerReference
globals()['ManagedFieldsEntry'] = ManagedFieldsEntry
globals()['OwnerReference'] = OwnerReference
class ObjectMeta(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'annotations': ({str: (str,)},), # noqa: E501
'cluster_name': (str,), # noqa: E501
'creation_timestamp': (datetime,), # noqa: E501
'deletion_grace_period_seconds': (int,), # noqa: E501
'deletion_timestamp': (datetime,), # noqa: E501
'finalizers': ([str],), # noqa: E501
'generate_name': (str,), # noqa: E501
'generation': (int,), # noqa: E501
'labels': ({str: (str,)},), # noqa: E501
'managed_fields': ([ManagedFieldsEntry],), # noqa: E501
'name': (str,), # noqa: E501
'namespace': (str,), # noqa: E501
'owner_references': ([OwnerReference],), # noqa: E501
'resource_version': (str,), # noqa: E501
'self_link': (str,), # noqa: E501
'uid': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'annotations': 'annotations', # noqa: E501
'cluster_name': 'clusterName', # noqa: E501
'creation_timestamp': 'creationTimestamp', # noqa: E501
'deletion_grace_period_seconds': 'deletionGracePeriodSeconds', # noqa: E501
'deletion_timestamp': 'deletionTimestamp', # noqa: E501
'finalizers': 'finalizers', # noqa: E501
'generate_name': 'generateName', # noqa: E501
'generation': 'generation', # noqa: E501
'labels': 'labels', # noqa: E501
'managed_fields': 'managedFields', # noqa: E501
'name': 'name', # noqa: E501
'namespace': 'namespace', # noqa: E501
'owner_references': 'ownerReferences', # noqa: E501
'resource_version': 'resourceVersion', # noqa: E501
'self_link': 'selfLink', # noqa: E501
'uid': 'uid', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ObjectMeta - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
annotations ({str: (str,)}): Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations. [optional] # noqa: E501
cluster_name (str): The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.. [optional] # noqa: E501
creation_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501
deletion_grace_period_seconds (int): Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.. [optional] # noqa: E501
deletion_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501
finalizers ([str]): Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.. [optional] # noqa: E501
generate_name (str): GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency. [optional] # noqa: E501
generation (int): A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.. [optional] # noqa: E501
labels ({str: (str,)}): Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. [optional] # noqa: E501
managed_fields ([ManagedFieldsEntry]): ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.. [optional] # noqa: E501
name (str): Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names. [optional] # noqa: E501
namespace (str): Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces. [optional] # noqa: E501
owner_references ([OwnerReference]): List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.. [optional] # noqa: E501
resource_version (str): An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency. [optional] # noqa: E501
self_link (str): SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.. [optional] # noqa: E501
uid (str): UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ObjectMeta - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
annotations ({str: (str,)}): Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations. [optional] # noqa: E501
cluster_name (str): The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.. [optional] # noqa: E501
creation_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501
deletion_grace_period_seconds (int): Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.. [optional] # noqa: E501
deletion_timestamp (datetime): Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.. [optional] # noqa: E501
finalizers ([str]): Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.. [optional] # noqa: E501
generate_name (str): GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency. [optional] # noqa: E501
generation (int): A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.. [optional] # noqa: E501
labels ({str: (str,)}): Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. [optional] # noqa: E501
managed_fields ([ManagedFieldsEntry]): ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.. [optional] # noqa: E501
name (str): Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names. [optional] # noqa: E501
namespace (str): Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces. [optional] # noqa: E501
owner_references ([OwnerReference]): List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.. [optional] # noqa: E501
resource_version (str): An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency. [optional] # noqa: E501
self_link (str): SelfLink is a URL representing this object. Populated by the system. Read-only. DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.. [optional] # noqa: E501
uid (str): UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 82.17284
| 978
| 0.673715
| 3,541
| 26,624
| 4.965829
| 0.144027
| 0.030937
| 0.029117
| 0.012511
| 0.855096
| 0.840764
| 0.837409
| 0.83212
| 0.83212
| 0.83212
| 0
| 0.012669
| 0.267691
| 26,624
| 323
| 979
| 82.427245
| 0.889214
| 0.733248
| 0
| 0.393333
| 0
| 0
| 0.191479
| 0.036519
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0.013333
| 0.066667
| 0.006667
| 0.186667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
37eaeffdf6f8489bea81e1ffa1c67a1c0f7218fa
| 161
|
py
|
Python
|
joplin/snippets/contact/fixtures/__init__.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 15
|
2018-09-27T07:36:30.000Z
|
2021-08-03T16:01:21.000Z
|
joplin/snippets/contact/fixtures/__init__.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 183
|
2017-11-16T23:30:47.000Z
|
2020-12-18T21:43:36.000Z
|
joplin/snippets/contact/fixtures/__init__.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 12
|
2017-12-12T22:48:05.000Z
|
2021-03-01T18:01:24.000Z
|
from .test_cases.name import name
# You can import any test_case fixture individually
# Or you can load them all with this function
def load_all():
name()
| 20.125
| 51
| 0.751553
| 27
| 161
| 4.37037
| 0.703704
| 0.101695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192547
| 161
| 7
| 52
| 23
| 0.907692
| 0.57764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5309361238e740b0c01451df30688ac126d4ecdd
| 3,052
|
py
|
Python
|
elegy/metrics/f1_test.py
|
sooheon/elegy
|
cad6f832cac1a34684c4f4f2c4a386cbfa817623
|
[
"Apache-2.0"
] | null | null | null |
elegy/metrics/f1_test.py
|
sooheon/elegy
|
cad6f832cac1a34684c4f4f2c4a386cbfa817623
|
[
"Apache-2.0"
] | null | null | null |
elegy/metrics/f1_test.py
|
sooheon/elegy
|
cad6f832cac1a34684c4f4f2c4a386cbfa817623
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
import jax.numpy as jnp
import tensorflow_addons as tfa
import numpy as np
import elegy
class F1Test(TestCase):
#
def test_basic(self):
y_true = jnp.array([0, 1, 1, 1])
y_pred = jnp.array([1, 0, 1, 1])
assert np.allclose(
elegy.metrics.F1().call_with_defaults()(y_true, y_pred),
tfa.metrics.F1Score(2, average="micro", threshold=0.5)(y_true, y_pred),
) # 2 * (0.44445 / 1.33334)
y_true = jnp.array([1, 1, 1, 1])
y_pred = jnp.array([1, 1, 0, 0])
assert np.allclose(
elegy.metrics.F1().call_with_defaults()(y_true, y_pred),
tfa.metrics.F1Score(2, average="micro", threshold=0.5)(y_true, y_pred),
) # 2 * (0.5 / 1.5)
y_true = (np.random.uniform(0, 1, size=(5, 6, 7)) > 0.5).astype(np.float32)
y_pred = np.random.uniform(0, 1, size=(5, 6, 7))
sample_weight = np.expand_dims(
(np.random.uniform(0, 1, size=(6, 7)) > 0.5).astype(np.float32), axis=0
)
assert np.allclose(
tfa.metrics.F1Score(2, average="micro", threshold=0.3)(y_true, y_pred),
elegy.metrics.F1(threshold=0.3).call_with_defaults()(
jnp.asarray(y_true), jnp.asarray(y_pred)
),
)
assert np.allclose(
tfa.metrics.F1Score(2, average="micro", threshold=0.3)(
y_true, y_pred, sample_weight=sample_weight
),
elegy.metrics.F1(threshold=0.3).call_with_defaults()(
jnp.asarray(y_true), jnp.asarray(y_pred), sample_weight=sample_weight
),
)
#
def test_cumulative(self):
em = elegy.metrics.F1(threshold=0.3)
tm = tfa.metrics.F1Score(2, average="micro", threshold=0.3)
# 1st run
y_true = (np.random.uniform(0, 1, size=(5, 6, 7)) > 0.5).astype(np.float32)
y_pred = np.random.uniform(0, 1, size=(5, 6, 7))
sample_weight = np.expand_dims(
(np.random.uniform(0, 1, size=(6, 7)) > 0.5).astype(np.float32), axis=0
)
assert np.allclose(
tm(y_true, y_pred, sample_weight=sample_weight),
em.call_with_defaults()(
jnp.asarray(y_true),
jnp.asarray(y_pred),
sample_weight=jnp.asarray(sample_weight),
),
)
# 2nd run
y_true = (np.random.uniform(0, 1, size=(5, 6, 7)) > 0.5).astype(np.float32)
y_pred = np.random.uniform(0, 1, size=(5, 6, 7))
sample_weight = np.expand_dims(
(np.random.uniform(0, 1, size=(6, 7)) > 0.5).astype(np.float32), axis=0
)
assert np.allclose(
tm(y_true, y_pred, sample_weight=sample_weight),
em.call_with_defaults()(
jnp.asarray(y_true),
jnp.asarray(y_pred),
sample_weight=jnp.asarray(sample_weight),
),
)
| 35.08046
| 86
| 0.533093
| 421
| 3,052
| 3.705463
| 0.142518
| 0.054487
| 0.086538
| 0.092308
| 0.864103
| 0.864103
| 0.840385
| 0.840385
| 0.803205
| 0.776923
| 0
| 0.066184
| 0.321756
| 3,052
| 86
| 87
| 35.488372
| 0.68744
| 0.018021
| 0
| 0.58209
| 0
| 0
| 0.008609
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 1
| 0.029851
| false
| 0
| 0.074627
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5345a32de26de4e7b0b687d53b01498b19081517
| 18,875
|
py
|
Python
|
api_tests/view_only_links/views/test_view_only_link_nodes.py
|
alexschiller/osf.io
|
4122d4be152c6189142c2ebb19cfdee09c77035d
|
[
"Apache-2.0"
] | 1
|
2015-10-02T18:35:53.000Z
|
2015-10-02T18:35:53.000Z
|
api_tests/view_only_links/views/test_view_only_link_nodes.py
|
alexschiller/osf.io
|
4122d4be152c6189142c2ebb19cfdee09c77035d
|
[
"Apache-2.0"
] | 4
|
2016-05-13T14:24:16.000Z
|
2017-03-30T15:28:31.000Z
|
api_tests/view_only_links/views/test_view_only_link_nodes.py
|
alexschiller/osf.io
|
4122d4be152c6189142c2ebb19cfdee09c77035d
|
[
"Apache-2.0"
] | null | null | null |
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from api_tests.nodes.views.test_node_view_only_links_list import ViewOnlyLinkTestCase
from osf_tests.factories import NodeFactory
class TestViewOnlyLinksNodes(ViewOnlyLinkTestCase):
def setUp(self):
super(TestViewOnlyLinksNodes, self).setUp()
self.url = '/{}view_only_links/{}/nodes/'.format(API_BASE, self.view_only_link._id)
def test_admin_can_view_vol_nodes_detail(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_read_write_cannot_view_vol_detail(self):
res = self.app.get(self.url, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_only_cannot_view_vol_detail(self):
res = self.app.get(self.url, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_user_cannot_view_vol_detail(self):
res = self.app.get(self.url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_view_vol_detail(self):
res = self.app.get(self.url, expect_errors=True)
assert_equal(res.status_code, 403)
class TestViewOnlyLinkNodesSet(ViewOnlyLinkTestCase):
def setUp(self):
super(TestViewOnlyLinkNodesSet, self).setUp()
self.component_one = NodeFactory(creator=self.user, parent=self.public_project, is_public=True)
self.component_two = NodeFactory(creator=self.user, parent=self.public_project, is_public=False)
self.project_two = NodeFactory(creator=self.user)
self.first_level_component = NodeFactory(creator=self.user, parent=self.public_project)
self.second_level_component = NodeFactory(creator=self.user, parent=self.first_level_component)
self.component_one_payload = {
'data': [
{
'type': 'nodes',
'id': self.component_one._id
}
]
}
self.url = '/{}view_only_links/{}/relationships/nodes/'.format(API_BASE, self.view_only_link._id)
def test_admin_can_set_single_node(self):
res = self.app.post_json_api(self.url, self.component_one_payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 201)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.component_one, self.view_only_link.nodes.all())
def test_admin_can_set_multiple_nodes(self):
payload = {
'data': [
{
'type': 'nodes',
'id': self.component_one._id
}, {
'type': 'nodes',
'id': self.component_two._id
}
]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 201)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.component_one, self.view_only_link.nodes.all())
assert_in(self.component_two, self.view_only_link.nodes.all())
def test_set_nodes_does_not_duplicate_nodes(self):
payload = {
'data': [
{
'type': 'nodes',
'id': self.public_project._id
}, {
'type': 'nodes',
'id': self.component_one._id
}, {
'type': 'nodes',
'id': self.component_one._id
}
]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 201)
assert_equal(self.view_only_link.nodes.count(), 2)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.component_one, self.view_only_link.nodes.all())
def test_set_node_not_component(self):
"""
Project One (already associated with VOL)
-> Level One Component (can be associated with VOL)
Project Two (CANNOT be associated with this VOL)
"""
payload = {
'data': [
{
'type': 'nodes',
'id': self.project_two._id
},
]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(self.project_two._id))
def test_set_node_second_level_component_without_first_level_parent(self):
"""
Parent Project (already associated with VOL)
-> First Level Component (NOT included)
-> Second Level Component (included -- OK)
"""
payload = {
'data': [
{
'type': 'nodes',
'id': self.second_level_component._id
},
]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
self.view_only_link.reload()
assert_equal(res.status_code, 201)
assert_equal(len(res.json['data']), 2)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.second_level_component, self.view_only_link.nodes.all())
def test_set_node_second_level_component_with_first_level_parent(self):
"""
Parent Project (already associated with VOL)
-> First Level Component (included)
-> Second Level Component (included -- OK)
"""
payload = {
'data': [
{
'type': 'nodes',
'id': self.first_level_component._id
},
{
'type': 'nodes',
'id': self.second_level_component._id
}
]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 201)
assert_in(self.first_level_component, self.view_only_link.nodes.all())
assert_in(self.second_level_component, self.view_only_link.nodes.all())
def test_invalid_nodes_in_payload(self):
payload = {
'data': [{
'type': 'nodes',
'id': 'abcde'
}]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_type_required_in_payload(self):
payload = {
'data': [{
'id': self.component_one._id
}]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_id_required_in_payload(self):
payload = {
'data': [{
'type': 'nodes',
}]
}
res = self.app.post_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_read_write_contributor_cannot_set_nodes(self):
res = self.app.post_json_api(self.url, self.component_one_payload, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_only_contributor_cannot_set_nodes(self):
res = self.app.post_json_api(self.url, self.component_one_payload, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_user_cannot_set_nodes(self):
res = self.app.post_json_api(self.url, self.component_one_payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_set_nodes(self):
res = self.app.post_json_api(self.url, self.component_one_payload, expect_errors=True)
assert_equal(res.status_code, 401)
class TestViewOnlyLinkNodesUpdate(TestViewOnlyLinkNodesSet):
def setUp(self):
super(TestViewOnlyLinkNodesUpdate, self).setUp()
self.update_payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id
}, {
'type': 'nodes',
'id': self.component_one._id
}]
}
def test_admin_can_update_nodes_single_node_to_add(self):
res = self.app.put_json_api(self.url, self.update_payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.component_one, self.view_only_link.nodes.all())
def test_admin_can_update_nodes_multiple_nodes_to_add(self):
self.update_payload['data'].append({
'type': 'nodes',
'id': self.component_two._id
})
res = self.app.put_json_api(self.url, self.update_payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 3)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.component_one, self.view_only_link.nodes.all())
assert_in(self.component_two, self.view_only_link.nodes.all())
def test_admin_can_update_nodes_single_node_to_remove(self):
self.view_only_link.nodes.add(self.component_one)
self.view_only_link.save()
self.update_payload['data'].pop()
res = self.app.put_json_api(self.url, self.update_payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_not_in(self.component_one, self.view_only_link.nodes.all())
def test_admin_can_update_nodes_multiple_nodes_to_remove(self):
self.view_only_link.nodes.add(self.component_one)
self.view_only_link.nodes.add(self.component_two)
self.view_only_link.save()
self.update_payload['data'].pop()
res = self.app.put_json_api(self.url, self.update_payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_not_in(self.component_one, self.view_only_link.nodes.all())
assert_not_in(self.component_two, self.view_only_link.nodes.all())
def test_admin_can_update_nodes_single_add_single_remove(self):
self.view_only_link.nodes.add(self.component_two)
self.view_only_link.save()
res = self.app.put_json_api(self.url, self.update_payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.component_one, self.view_only_link.nodes.all())
assert_not_in(self.component_two, self.view_only_link.nodes.all())
def test_admin_can_update_nodes_multiple_add_multiple_remove(self):
self.view_only_link.nodes.add(self.component_one)
self.view_only_link.nodes.add(self.component_two)
self.view_only_link.save()
component_three = NodeFactory(creator=self.user, parent=self.public_project)
component_four = NodeFactory(creator=self.user, parent=self.public_project)
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id,
}, {
'type': 'nodes',
'id': component_three._id
}, {
'type': 'nodes',
'id': component_four._id
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 3)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_not_in(self.component_one, self.view_only_link.nodes.all())
assert_not_in(self.component_two, self.view_only_link.nodes.all())
assert_in(component_three, self.view_only_link.nodes.all())
assert_in(component_four, self.view_only_link.nodes.all())
def test_update_nodes_no_changes(self):
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id,
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_in(self.public_project, self.view_only_link.nodes.all())
def test_update_nodes_top_level_node_not_included(self):
"""
Parent Project (NOT included)
-> First Level Component (included) -- NOT ALLOWED
"""
payload = {
'data': [{
'type': 'nodes',
'id': self.component_one._id
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(self.component_one._id))
def test_update_node_not_component(self):
payload = {
'data': [{
'type': 'nodes',
'id': self.project_two._id
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.'.format(self.project_two._id))
def test_update_node_second_level_component_without_first_level_parent(self):
"""
Parent Project (included)
-> First Level Component (NOT included)
-> Second Level Component (included) -- OK
"""
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id
}, {
'type': 'nodes',
'id': self.second_level_component._id
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.second_level_component, self.view_only_link.nodes.all())
def test_update_node_second_level_component_with_first_level_parent(self):
"""
Parent Project (included)
-> First Level Component (included)
-> Second Level Component (included) -- OK
"""
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id
}, {
'type': 'nodes',
'id': self.first_level_component._id
}, {
'type': 'nodes',
'id': self.second_level_component._id
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
self.view_only_link.reload()
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 3)
assert_in(self.public_project, self.view_only_link.nodes.all())
assert_in(self.first_level_component, self.view_only_link.nodes.all())
assert_in(self.second_level_component, self.view_only_link.nodes.all())
def test_invalid_nodes_in_payload(self):
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id
}, {
'type': 'nodes',
'id': 'abcde'
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_type_required_in_payload(self):
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id
}, {
'id': self.component_one._id
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_id_required_in_payload(self):
payload = {
'data': [{
'type': 'nodes',
'id': self.public_project._id
}, {
'type': 'nodes'
}]
}
res = self.app.put_json_api(self.url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_read_write_contributor_cannot_update_nodes(self):
res = self.app.put_json_api(self.url, self.update_payload, auth=self.read_write_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_read_only_contributor_cannot_update_nodes(self):
res = self.app.put_json_api(self.url, self.update_payload, auth=self.read_only_user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_logged_in_user_cannot_update_nodes(self):
res = self.app.put_json_api(self.url, self.update_payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_unauthenticated_user_cannot_update_nodes(self):
res = self.app.put_json_api(self.url, self.update_payload, expect_errors=True)
assert_equal(res.status_code, 401)
| 41.122004
| 262
| 0.61902
| 2,394
| 18,875
| 4.567669
| 0.055138
| 0.051943
| 0.074623
| 0.090718
| 0.918061
| 0.893187
| 0.888797
| 0.886694
| 0.866118
| 0.846548
| 0
| 0.00911
| 0.267232
| 18,875
| 458
| 263
| 41.21179
| 0.781505
| 0.04053
| 0
| 0.692722
| 0
| 0.053908
| 0.046133
| 0.003914
| 0
| 0
| 0
| 0
| 0.229111
| 1
| 0.105121
| false
| 0
| 0.010782
| 0
| 0.123989
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7255aa5e6b9b679c8d126a47ff93d61bcc2c8950
| 49
|
py
|
Python
|
python/shotgun_globals/__init__.py
|
Consulado/tk-framework-consuladoutils
|
b5eecd001f2d4a6b39459ffc6116c4a6520112f0
|
[
"Apache-2.0"
] | 1
|
2021-06-17T20:00:56.000Z
|
2021-06-17T20:00:56.000Z
|
python/shotgun_globals/__init__.py
|
Consulado/tk-framework-consuladoutils
|
b5eecd001f2d4a6b39459ffc6116c4a6520112f0
|
[
"Apache-2.0"
] | null | null | null |
python/shotgun_globals/__init__.py
|
Consulado/tk-framework-consuladoutils
|
b5eecd001f2d4a6b39459ffc6116c4a6520112f0
|
[
"Apache-2.0"
] | 1
|
2021-05-18T18:17:44.000Z
|
2021-05-18T18:17:44.000Z
|
from .entities import get_custom_entity_by_alias
| 24.5
| 48
| 0.897959
| 8
| 49
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7264cf2afe49c980ad9705a79b104082158f5c37
| 12,162
|
py
|
Python
|
hazma/vector_mediator/form_factors/utils.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 6
|
2019-07-30T18:14:43.000Z
|
2020-10-25T04:58:44.000Z
|
hazma/vector_mediator/form_factors/utils.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 8
|
2017-12-19T08:06:59.000Z
|
2021-04-22T02:15:26.000Z
|
hazma/vector_mediator/form_factors/utils.py
|
LoganAMorrison/Hazma
|
e9612729767ff48d5ce50633393f81ee021242d2
|
[
"MIT"
] | 1
|
2020-04-01T11:08:49.000Z
|
2020-04-01T11:08:49.000Z
|
from typing import Generator, Optional, Union
import numpy as np
import numpy.typing as npt
from scipy.special import gamma # type:ignore
# Pion mass in GeV
MPI_GEV = 0.13957018
# Neutral Kaon mass in GeV
MK0_GEV = 0.497611
# Charged Kaon mass in GeV
MKP_GEV = 0.493677
# Charged Kaon mass in GeV
META_GEV = 0.547862
def beta2(
s: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
) -> Union[float, npt.NDArray[np.float64]]:
"""
Return the final state momentum times 4 / s.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
Returns
-------
beta: Union[float, npt.NDArray]
Final state momentum times 4 / s.
"""
return np.clip(
(1.0 - (m1 + m2) ** 2 / s) * (1.0 - (m1 - m2) ** 2 / s), 0.0, None
) # type:ignore
def beta(
s: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
):
"""
Return the final state momentum times 4 / s.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
Returns
-------
beta: Union[float, npt.NDArray]
Final state momentum times 4 / s.
"""
return np.sqrt(beta2(s, m1, m2))
def dhhatds(
mres: Union[float, npt.NDArray[np.float64]],
gamma: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
) -> Union[float, npt.NDArray[np.float64]]:
"""
Compute the derivative of the Hhat(s) function for the Gounaris-Sakurai
Breit-Wigner function evaluated at the resonance mass. See ArXiv:1002.0279
Eqn.(4) for details.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
mres: Union[float, npt.NDArray]
Mass of the resonance.
gamma: Union[float, npt.NDArray]
Width of the resonance.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
Returns
-------
dhhat: Union[float, npt.NDArray]
The value of the the derivative of Hhat(s) evaluated at the resonance
mass.
"""
v2 = beta2(mres ** 2, m1, m2)
v = np.sqrt(v2)
r = (m1 ** 2 + m2 ** 2) / mres ** 2
return (
gamma
/ np.pi
/ mres
/ v2
* (
(3.0 - 2.0 * v2 - 3.0 * r) * np.log((1.0 + v) / (1.0 - v))
+ 2.0 * v * (1.0 - r / (1.0 - v2))
)
)
def hhat(
s: Union[float, npt.NDArray[np.float64]],
mres: Union[float, npt.NDArray[np.float64]],
gamma: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
reshape=False,
) -> Union[float, npt.NDArray[np.float64]]:
"""
Compute the Hhat(s) function for the Gounaris-Sakurai Breit-Wigner
function. See ArXiv:1002.0279 Eqn.(4) for details.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
mres: Union[float, npt.NDArray]
Mass of the resonance.
gamma: Union[float, npt.NDArray]
Width of the resonance.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
reshape: Optional[bool]
If true, a different value is computed for each `s`. This is useful
for computing form-factors for many squared center-of-mass energies at
once.
Returns
-------
hhat: Union[float, npt.NDArray]
The value of the Hhat(s) function.
"""
vr = beta(mres ** 2, m1, m2)
v = beta(s, m1, m2)
if hasattr(s, "__len__") and reshape:
ss = np.array(s)
return (
gamma
/ mres
/ np.pi
* ss[:, np.newaxis]
* (v[:, np.newaxis] / vr) ** 3
* np.log((1.0 + v[:, np.newaxis]) / (1.0 - v[:, np.newaxis]))
)
return gamma / mres / np.pi * s * (v / vr) ** 3 * np.log((1.0 + v) / (1.0 - v))
def h(
s: Union[float, npt.NDArray[np.float64]],
mres: Union[float, npt.NDArray[np.float64]],
gamma: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
dh: Union[float, npt.NDArray[np.float64]],
hres: Union[float, npt.NDArray[np.float64]],
reshape=False,
) -> Union[float, npt.NDArray[np.float64]]:
"""
Compute the H(s) function for the Gounaris-Sakurai Breit-Wigner function.
See ArXiv:1002.0279 Eqn.(3) for details.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
mres: Union[float, npt.NDArray]
Mass of the resonance.
gamma: Union[float, npt.NDArray]
Width of the resonance.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
dh: Union[float, npt.NDArray]
Derivative of the of the H-hat function evaluated at the resonance
mass.
hres: Union[float, npt.NDArray]
Value of the H(s) function at s=mres^2.
reshape: Optional[bool]
If true, a different value is computed for each `s`. This is useful
for computing form-factors for many squared center-of-mass energies at
once.
Returns
-------
h: Union[float, npt.NDArray]
The value of the H(s) function.
"""
if hasattr(s, "__len__") and reshape:
ss = np.array(s)
return (
hhat(ss, mres, gamma, m1, m2, reshape=True)
- hres
- (ss[:, np.newaxis] - mres ** 2) * dh
)
if s != 0.0:
return hhat(s, mres, gamma, m1, m2) - hres - (s - mres ** 2) * dh
else:
return (
-2.0 * (m1 + m2) ** 2 / np.pi * gamma / mres / beta(mres ** 2, m1, m2) ** 3
- hres
+ mres ** 2 * dh
)
def gamma_p(
s: Union[float, npt.NDArray[np.float64]],
mres: Union[float, npt.NDArray[np.float64]],
gamma: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
reshape: Optional[bool] = False,
) -> Union[float, npt.NDArray[np.float64]]:
"""
Compute the s-dependent width of the resonance.
See ArXiv:1002.0279 Eqn.(6) for details.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
mres: Union[float, npt.NDArray]
Mass of the resonance.
gamma: Union[float, npt.NDArray]
Width of the resonance.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
reshape: Optional[bool]
If true, a different value is computed for each `s`. This is useful
for computing form-factors for many squared center-of-mass energies at
once.
Returns
-------
gamma: Union[float, npt.NDArray]
The s-dependent width.
"""
v2 = beta2(s, m1, m2)
vr2 = beta2(mres ** 2, m1, m2)
if hasattr(s, "__len__") and reshape:
rp = np.sqrt(
np.clip(
v2[:, np.newaxis] / vr2, # type:ignore
0.0,
None,
)
)
return np.sqrt(s)[:, np.newaxis] / mres * rp ** 3 * gamma
rp = np.where(vr2 == 0.0, vr2, np.sqrt(np.clip(v2 / vr2, 0.0, None)))
return np.sqrt(s) / mres * rp ** 3 * gamma
def breit_wigner_gs(
s: Union[float, npt.NDArray[np.float64]],
mres: Union[float, npt.NDArray[np.float64]],
gamma: Union[float, npt.NDArray[np.float64]],
m1: float,
m2: float,
h0: Union[float, npt.NDArray[np.float64]],
dh: Union[float, npt.NDArray[np.float64]],
hres: Union[float, npt.NDArray[np.float64]],
reshape: Optional[bool] = False,
) -> Union[complex, npt.NDArray[np.complex128]]:
"""
Compute the Gounaris-Sakurai Breit-Wigner function with pion loop
corrections included. See ArXiv:1002.0279 Eqn.(2) for details.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
mres: Union[float, npt.NDArray]
Mass of the resonance.
gamma: Union[float, npt.NDArray]
Width of the resonance.
m1: float
Mass of the first final state particle.
m2: float
Mass of the second final state particle.
h0: Union[float, npt.NDArray]
Value of the H(s) function at s=0.
dh: Union[float, npt.NDArray]
Derivative of the of the H-hat function evaluated at the resonance
mass.
hres: Union[float, npt.NDArray]
Value of the H(s) function at s=mres^2.
reshape: Optional[bool]
If true, a different value is computed for each `s`. This is useful
for computing form-factors for many squared center-of-mass energies at
once.
Returns
-------
bw: Union[float, npt.NDArray]
The Breit-Wigner function.
"""
mr2 = mres ** 2
if hasattr(s, "__len__") and reshape:
ss = np.array(s)
return (mr2 + h0) / (
mr2
- ss[:, np.newaxis]
+ h(ss, mres, gamma, m1, m2, dh, hres, reshape=True)
- 1j
* np.sqrt(ss)[:, np.newaxis]
* gamma_p(ss, mres, gamma, m1, m2, reshape=True)
)
return (mr2 + h0) / (
mr2
- s
+ h(s, mres, gamma, m1, m2, dh, hres)
- 1j * np.sqrt(s) * gamma_p(s, mres, gamma, m1, m2)
)
def breit_wigner_fw(
s: Union[float, npt.NDArray[np.float64]],
mres: Union[float, complex, npt.NDArray[np.float64]],
gamma: Union[float, complex, npt.NDArray[np.float64]],
reshape: Optional[bool] = False,
) -> Union[complex, npt.NDArray[np.complex128]]:
"""
Compute the standard Breit-Wigner with a constant width. See
ArXiv:1002.0279 Eqn.(8) for details.
Parameters
----------
s: Union[float, npt.NDArray]
Center-of-mass energy squared.
mres: Union[float, npt.NDArray]
Mass of the resonance.
gamma: Union[float, npt.NDArray]
Width of the resonance.
reshape: Optional[bool]
If true, a different value is computed for each `s`. This is useful
for computing form-factors for many squared center-of-mass energies at
once.
Returns
-------
bw: Union[float, npt.NDArray]
The Breit-Wigner function.
"""
mr2 = mres ** 2
if hasattr(s, "__len__") and reshape:
ss = np.array(s)
return mr2 / (mr2 - ss[:, np.newaxis] - 1j * mres * gamma)
return mr2 / (mr2 - s - 1j * mres * gamma)
def breit_wigner_pwave(
s: Union[float, npt.NDArray[np.float64]],
mres: Union[float, complex, npt.NDArray[np.complex128]],
gamma: Union[float, complex, npt.NDArray[np.complex128]],
m1: float,
m2: float,
reshape: Optional[bool] = False,
):
mr2 = mres ** 2
if hasattr(s, "__len__") and reshape:
ss = np.array(s)
return mr2 / (
mr2
- ss[:, np.newaxis]
- 1j
* np.sqrt(ss)[:, np.newaxis]
* gamma_p(ss, mres, gamma, m1, m2, reshape=True) # type:ignore
)
return mr2 / (
mr2 - s - 1j * np.sqrt(s) * gamma_p(s, mres, gamma, m1, m2) # type:ignore
)
def gamma_generator(
beta: float,
nmax: int,
) -> Generator[float, None, None]:
"""
Generator to efficiently compute gamma(2 - beta + n) / gamma(1 + n) for
values of n less than a specified maximum value. This is done recurrsively
to avoid roundoff errors.
Parameters
----------
beta: float
Value inside the gamma-function in the numerator of:
gamma(2 - beta + n) / gamma(1 + n)
nmax: int
Maximum value to compute the function for.
Returns
-------
gamma_gen: Generator[float, None, None]
Generator to yield values of gamma(2 - beta + n) / gamma(1 + n).
"""
val = gamma(2.0 - beta)
yield val
n = 1
while n < nmax:
val *= (1.0 - beta + n) / n
n += 1
yield val
| 28.819905
| 87
| 0.571781
| 1,679
| 12,162
| 4.117332
| 0.097082
| 0.096919
| 0.114711
| 0.176479
| 0.831043
| 0.780558
| 0.748734
| 0.722841
| 0.686388
| 0.680023
| 0
| 0.040711
| 0.297155
| 12,162
| 421
| 88
| 28.888361
| 0.768016
| 0.467686
| 0
| 0.536313
| 0
| 0
| 0.007428
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055866
| false
| 0
| 0.022346
| 0
| 0.167598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7276ef5317ff2d060cc21e40188b6779d02ccc9b
| 3,181
|
py
|
Python
|
znail/ui/api/disciplines/test/test_packet_delay.py
|
Zenterio/znail
|
68cd3a4b5ae866f3a8846ce1d0fb5f89428a6b18
|
[
"Apache-2.0"
] | 4
|
2019-02-20T09:40:49.000Z
|
2019-11-19T21:18:44.000Z
|
znail/ui/api/disciplines/test/test_packet_delay.py
|
Zenterio/znail
|
68cd3a4b5ae866f3a8846ce1d0fb5f89428a6b18
|
[
"Apache-2.0"
] | 4
|
2019-03-11T15:24:17.000Z
|
2019-06-14T14:31:01.000Z
|
znail/ui/api/disciplines/test/test_packet_delay.py
|
Zenterio/znail
|
68cd3a4b5ae866f3a8846ce1d0fb5f89428a6b18
|
[
"Apache-2.0"
] | 2
|
2019-03-05T19:04:06.000Z
|
2019-09-08T13:53:10.000Z
|
import unittest
from unittest.mock import call, patch
from znail.netem.disciplines import PacketDelay
from znail.netem.tc import Tc
from znail.ui import app
class TestPacketDelay(unittest.TestCase):
def setUp(self):
tc_clear_patcher = patch.object(Tc, 'clear')
self.tc_clear = tc_clear_patcher.start()
self.addCleanup(tc_clear_patcher.stop)
tc_apply_patcher = patch.object(Tc, 'apply')
self.tc_apply = tc_apply_patcher.start()
self.addCleanup(tc_apply_patcher.stop)
self.client = app.test_client()
def tearDown(self):
self.client.post('/api/disciplines/packet_delay/clear')
def test_empty(self):
response = self.client.get('/api/disciplines/packet_delay')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'milliseconds': None})
def test_can_be_set(self):
response = self.client.post('/api/disciplines/packet_delay', json={'milliseconds': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'message': 'ok'})
last_call = self.tc_apply.call_args_list[-1]
self.assertEqual(last_call, call({'delay': PacketDelay(milliseconds=1)}))
response = self.client.get('/api/disciplines/packet_delay')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'milliseconds': 1})
def test_can_be_updated(self):
response = self.client.post('/api/disciplines/packet_delay', json={'milliseconds': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'message': 'ok'})
last_call = self.tc_apply.call_args_list[-1]
self.assertEqual(last_call, call({'delay': PacketDelay(milliseconds=1)}))
response = self.client.get('/api/disciplines/packet_delay')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'milliseconds': 1})
response = self.client.post('/api/disciplines/packet_delay', json={'milliseconds': 2})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'message': 'ok'})
last_call = self.tc_apply.call_args_list[-1]
self.assertEqual(last_call, call({'delay': PacketDelay(milliseconds=2)}))
response = self.client.get('/api/disciplines/packet_delay')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'milliseconds': 2})
def test_can_not_be_set_to_invalid_value(self):
response = self.client.post('/api/disciplines/packet_delay', json={'invalid': 'data'})
self.assertEqual(response.status_code, 422)
def test_bad_request(self):
response = self.client.post('/api/disciplines/packet_delay')
self.assertEqual(response.status_code, 400)
def test_can_be_cleared(self):
response = self.client.post('/api/disciplines/packet_delay/clear')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {'message': 'ok'})
last_call = self.tc_apply.call_args_list[-1]
self.assertEqual(last_call, call({}))
| 39.7625
| 94
| 0.688148
| 394
| 3,181
| 5.365482
| 0.162437
| 0.156102
| 0.195837
| 0.130085
| 0.767739
| 0.725639
| 0.725639
| 0.725639
| 0.70246
| 0.6386
| 0
| 0.01645
| 0.178246
| 3,181
| 79
| 95
| 40.265823
| 0.792272
| 0
| 0
| 0.448276
| 0
| 0
| 0.153097
| 0.104055
| 0
| 0
| 0
| 0
| 0.37931
| 1
| 0.137931
| false
| 0
| 0.086207
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72946fd0124b094a0101a6dec085261b69b4f26e
| 101
|
py
|
Python
|
models/__init__.py
|
GuohongLi/simclr-pytorch
|
7e08b2433a623fdbc1c097402fded4cc69d1b54e
|
[
"BSD-3-Clause"
] | null | null | null |
models/__init__.py
|
GuohongLi/simclr-pytorch
|
7e08b2433a623fdbc1c097402fded4cc69d1b54e
|
[
"BSD-3-Clause"
] | null | null | null |
models/__init__.py
|
GuohongLi/simclr-pytorch
|
7e08b2433a623fdbc1c097402fded4cc69d1b54e
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from .resnet_simclr import *
from .baseline_encoder import *
| 20.2
| 38
| 0.831683
| 13
| 101
| 5.923077
| 0.615385
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 4
| 39
| 25.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72cf2feb1235250426ddb91529c3032007b15062
| 42
|
py
|
Python
|
turf/bearing/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 11
|
2020-08-26T11:04:55.000Z
|
2022-01-26T14:53:10.000Z
|
turf/bearing/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 36
|
2020-04-09T16:49:05.000Z
|
2020-06-01T14:39:37.000Z
|
turf/bearing/__init__.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 5
|
2020-07-30T23:37:35.000Z
|
2021-08-24T08:10:28.000Z
|
from turf.bearing._bearing import bearing
| 21
| 41
| 0.857143
| 6
| 42
| 5.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72f37e8fef80c03a543340d00233578d581ba798
| 91
|
py
|
Python
|
app/teacher/__init__.py
|
siwl/test_website
|
c19263c86174796214b039189cc3a65af2baec7d
|
[
"MIT"
] | null | null | null |
app/teacher/__init__.py
|
siwl/test_website
|
c19263c86174796214b039189cc3a65af2baec7d
|
[
"MIT"
] | null | null | null |
app/teacher/__init__.py
|
siwl/test_website
|
c19263c86174796214b039189cc3a65af2baec7d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
teacher = Blueprint('teacher', __name__)
from . import views
| 15.166667
| 40
| 0.769231
| 11
| 91
| 6
| 0.636364
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 91
| 5
| 41
| 18.2
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
72fcee1493f195ec3bae74593e478750b54bd8d2
| 23,803
|
py
|
Python
|
python/oneflow/compatible/single_client/nn/modules/pooling.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1
|
2021-09-13T02:34:53.000Z
|
2021-09-13T02:34:53.000Z
|
python/oneflow/compatible/single_client/nn/modules/pooling.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/compatible/single_client/nn/modules/pooling.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 1
|
2021-01-17T03:34:39.000Z
|
2021-01-17T03:34:39.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.nn.common_types import (
_size_1_t,
_size_2_t,
_size_3_t,
)
from oneflow.compatible.single_client.nn.module import Module
from oneflow.compatible.single_client.nn.modules.utils import _pair, _single, _triple
from oneflow.compatible.single_client.ops.nn_ops import (
calc_pool_padding,
get_dhw_offset,
)
class AvgPool1d(Module):
"""Applies a 1D average pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and `kernel_size` :math:`k`
can be precisely described as:
.. math::
out(N_i, C_j, l) = \\frac{1}{k} \\sum_{m=0}^{k-1}
input(N_i, C_j, stride[0] \\times h + m, stride*l + m)
If padding is non-zero, then the input is implicitly zero-padded on both sides for padding number of points.
The parameters kernel_size, stride, padding can each be an int or a one-element tuple.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the
input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: the size of the window.
strides: the stride of the window. Default value is kernel_size.
padding: implicit zero padding to be added on both sides.
ceil_mode: when True, will use ceil instead of floor to compute the output shape.
count_include_pad: when True, will include the zero-padding in the averaging calculation.
# TODO: fix cuDNN bugs in pooling_1d
"""
def __init__(
self,
kernel_size: _size_1_t,
stride: Optional[_size_1_t] = None,
padding: _size_1_t = 0,
ceil_mode: bool = False,
count_include_pad: Optional[bool] = None,
name: Optional[str] = None,
):
raise NotImplementedError
class AvgPool2d(Module):
"""Performs the 2d-average pooling on the input.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and `kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, h, w) = \\frac{1}{kH * kW} \\sum_{m=0}^{kH-1} \\sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \\times h + m, stride[1] \\times w + n)
Args:
kernel_size (Union[int, Tuple[int, int]]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input Tensor.
strides (Union[int, Tuple[int, int]]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input Tensor.
padding (Tuple[int, int]): An int or list of ints that has length 1, 2. Implicit zero padding to be added on both sides.
ceil_mode (bool, default to False): When True, will use ceil instead of floor to compute the output shape.
For example:
.. code-block:: python
import oneflow.compatible.single_client.experimental as flow
import numpy as np
of_avgpool2d = flow.nn.AvgPool2d(
kernel_size=(3, 2),
padding=0,
stride=(2, 1),
)
x = flow.Tensor(shape=(1, 1, 10, 10))
of_y = of_avgpool2d(x)
"""
def __init__(
self,
kernel_size: _size_2_t,
stride: Optional[_size_2_t] = None,
padding: _size_2_t = 0,
ceil_mode: bool = False,
count_include_pad: Optional[bool] = None,
divisor_override: Optional[int] = None,
name: Optional[str] = None,
):
super().__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride) if stride is not None else kernel_size
assert isinstance(padding, int) or isinstance(
padding, tuple
), "padding can only int int or tuple of 2 ints."
padding = _pair(padding)
padding = [0, 0, *padding]
assert count_include_pad is None, "count_include_pad not supported yet"
assert divisor_override is None, "divisor_override not supported yet"
self._channel_pos = "channels_first"
(self._padding_type, _pads_list) = calc_pool_padding(
padding, get_dhw_offset(self._channel_pos), 2
)
self._padding_before = [pad[0] for pad in _pads_list]
self._padding_after = [pad[1] for pad in _pads_list]
self.ceil_mode = ceil_mode
def forward(self, x):
res = flow.F.avg_pool_2d(
x,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self._padding_type,
padding_before=self._padding_before,
padding_after=self._padding_after,
ceil_mode=self.ceil_mode,
data_format=self._channel_pos,
)
return res
class AvgPool3d(Module):
"""Applies a 3D average pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and `kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, d, h, w) = \\frac{1}{kD * kH * kW } \\sum_{k=0}^{kD-1} \\sum_{m=0}^{kH-1} \\sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \\times d + k, stride[1] \\times h + m, stride[2] \\times w + n)
If padding is non-zero, then the input is implicitly zero-padded on all three sides for padding number of points.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the
input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: the size of the window.
strides: the stride of the window. Default value is kernel_size.
padding: implicit zero padding to be added on all three sides.
ceil_mode: when True, will use ceil instead of floor to compute the output shape.
count_include_pad: when True, will include the zero-padding in the averaging calculation.
divisor_override: if specified, it will be used as divisor, otherwise kernel_size will be used.
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \\left\\lfloor\\frac{D_{in} + 2 \\times \\text{padding}[0] - \\text{kernel_size}[0]}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[1] - \\text{kernel_size}[1]}{\\text{stride}[1]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[2] - \\text{kernel_size}[2]}{\\text{stride}[2]} + 1\\right\\rfloor
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> inputarr = np.random.randn(9, 7, 11, 32, 20)
>>> of_avgpool3d = flow.nn.AvgPool3d(kernel_size=(2,2,2),padding=(0,0,0),stride=(1,1,1),)
>>> x = flow.Tensor(inputarr)
>>> y = of_avgpool3d(x)
"""
def __init__(
self,
kernel_size: _size_3_t,
stride: Optional[_size_3_t] = None,
padding: _size_3_t = 0,
ceil_mode: bool = False,
count_include_pad: Optional[bool] = None,
divisor_override: Optional[int] = None,
):
super().__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride) if stride is not None else kernel_size
assert padding == (0, 0, 0), "padding>0 not supported yet"
assert isinstance(padding, int) or isinstance(
padding, tuple
), "padding can only int int or tuple of 3 ints."
padding = _pair(padding)
padding = [0, 0, *padding]
assert count_include_pad is None, "count_include_pad not supported yet"
assert divisor_override is None, "divisor_override not supported yet"
_channel_pos = "channels_first"
(_padding_type, _pads_list) = calc_pool_padding(
padding, get_dhw_offset(_channel_pos), 3
)
_padding_before = [pad[0] for pad in _pads_list]
_padding_after = [pad[1] for pad in _pads_list]
self._op = (
flow.builtin_op("avg_pool_3d")
.Attr("data_format", _channel_pos)
.Attr("pool_size", kernel_size)
.Attr("strides", stride)
.Attr("ceil_mode", ceil_mode)
.Attr("padding", _padding_type)
.Attr("padding_before", _padding_before)
.Attr("padding_after", _padding_after)
.Input("x")
.Output("y")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
class MaxPool1d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool1d.html#torch.nn.MaxPool1d
Applies a 1D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
and output :math:`(N, C, L_{out})` can be precisely described as:
.. math::
out(N_i, C_j, k) = \\max_{m=0, \\ldots, \\text{kernel\\_size} - 1}
input(N_i, C_j, stride \\times k + m)
If :attr:`padding` is non-zero, then the input is implicitly padded with minimum value on both sides
for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
sliding window. This `link`_ has a nice visualization of the pooling parameters.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
Args:
kernel_size: The size of the sliding window, must be > 0.
stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.MaxUnpool1d` later
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
.. math::
L_{out} = \\left\\lfloor \\frac{L_{in} + 2 \\times \\text{padding} - \\text{dilation}
\\times (\\text{kernel_size} - 1) - 1}{\\text{stride}} + 1\\right\\rfloor
"""
def __init__(
self,
kernel_size: _size_1_t,
stride: Optional[_size_1_t] = None,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
raise NotImplementedError
class MaxPool2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d
Applies a 2D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
\\begin{aligned}
out(N_i, C_j, h, w) ={} & \\max_{m=0, \\ldots, kH-1} \\max_{n=0, \\ldots, kW-1} \\\\
& \\text{input}(N_i, C_j, \\text{stride[0]} \\times h + m,
\\text{stride[1]} \\times w + n)
\\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly minimum value padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit minimum value padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool2d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 * \\text{padding[0]} - \\text{dilation[0]}
\\times (\\text{kernel_size[0]} - 1) - 1}{\\text{stride[0]}} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 * \\text{padding[1]} - \\text{dilation[1]}
\\times (\\text{kernel_size[1]} - 1) - 1}{\\text{stride[1]}} + 1\\right\\rfloor
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> kernel_size, stride, padding = (3, 3), (1, 1), (1, 2)
>>> m = flow.nn.MaxPool2d(kernel_size, stride, padding)
>>> np.random.seed(0)
>>> x = flow.Tensor(np.random.rand(1, 1, 5, 3))
>>> y = m(x)
>>> y #doctest: +ELLIPSIS
tensor([[[[0.5488, 0.7152, 0.7152, 0.7152, 0.6459],
...
[0.568 , 0.9256, 0.9256, 0.9256, 0.5289]]]], dtype=oneflow.float32)
>>> kernel_size, stride, padding = (2, 3), (4, 5), (1, 2)
>>> m = flow.nn.MaxPool2d(kernel_size, stride, padding)
>>> x = flow.Tensor(np.random.randn(9, 7, 32, 20))
>>> y = m(x)
>>> y.size()
flow.Size([9, 7, 9, 5])
"""
def __init__(
self,
kernel_size: _size_2_t,
stride: Optional[_size_2_t] = None,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
super().__init__()
self.kernel_size = _pair(kernel_size)
self.strides = _pair(stride) if stride is not None else kernel_size
data_format = "NCHW"
self.channel_pos = (
"channels_last" if data_format == "NHWC" else "channels_first"
)
assert return_indices is False, "Only support return_indices==False for now!"
assert dilation == 1 or dilation == (1, 1), "Only support dilation==1 for now!"
padding = _pair(padding)
if len(padding) == 2:
if data_format == "NCHW":
padding = (0, 0, padding[0], padding[1])
else:
raise ValueError("error padding param!")
else:
raise ValueError("error padding param!")
(self.padding_type, pads_list) = calc_pool_padding(
padding, get_dhw_offset(self.channel_pos), 2
)
self.padding_before = [pad[0] for pad in pads_list]
self.padding_after = [pad[1] for pad in pads_list]
self.ceil_mode = ceil_mode
def forward(self, x):
return flow.F.max_pool_2d(
x,
kernel_size=self.kernel_size,
stride=self.strides,
padding=self.padding_type,
padding_before=self.padding_before,
padding_after=self.padding_after,
ceil_mode=self.ceil_mode,
data_format=self.channel_pos,
)
class MaxPool3d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool3d.html#torch.nn.MaxPool3d
Applies a 3D max pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\\begin{aligned}
\\text{out}(N_i, C_j, d, h, w) ={} & \\max_{k=0, \\ldots, kD-1} \\max_{m=0, \\ldots, kH-1} \\max_{n=0, \\ldots, kW-1} \\\\
& \\text{input}(N_i, C_j, \\text{stride[0]} \\times d + k,
\\text{stride[1]} \\times h + m, \\text{stride[2]} \\times w + n)
\\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly minimum value on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit minimum value padding to be added on all three sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool3d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \\left\\lfloor\\frac{D_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0] \\times
(\\text{kernel_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1] \\times
(\\text{kernel_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[2] - \\text{dilation}[2] \\times
(\\text{kernel_size}[2] - 1) - 1}{\\text{stride}[2]} + 1\\right\\rfloor
For example:
.. code-block:: python
>>> import oneflow.compatible.single_client.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> kernel_size, stride, padding = (3, 3, 3), (1, 1, 1), (1, 1, 2)
>>> m = flow.nn.MaxPool3d(kernel_size, stride, padding)
>>> np.random.seed(0)
>>> x = flow.Tensor(np.random.rand(1, 1, 3, 5, 3))
>>> y = m(x)
>>> y #doctest: +ELLIPSIS
tensor([[[[[0.7782, 0.87 , 0.9786, 0.9786, 0.9786],
...
[0.9447, 0.9447, 0.9447, 0.6668, 0.6668]]]]], dtype=oneflow.float32)
>>> kernel_size, stride, padding = (2, 2, 3), (3, 4, 5), (2, 1, 2)
>>> m = flow.nn.MaxPool3d(kernel_size, stride, padding)
>>> x = flow.Tensor(np.random.randn(9, 7, 11, 32, 20))
>>> y = m(x)
>>> y.size()
flow.Size([9, 7, 5, 9, 5])
"""
def __init__(
self,
kernel_size: _size_3_t,
stride: Optional[_size_3_t] = None,
padding: _size_3_t = 0,
dilation: _size_3_t = 1,
return_indices: bool = False,
ceil_mode: bool = False,
):
super().__init__()
kernel_size = _triple(kernel_size)
strides = _triple(stride) if stride is not None else kernel_size
data_format = "NCDHW"
channel_pos = "channels_last" if data_format == "NDHWC" else "channels_first"
assert return_indices is False, "Only support return_indices==False for now!"
assert dilation == 1 or dilation == (
1,
1,
1,
), "Only support dilation==1 for now!"
padding = _triple(padding)
if len(padding) == 3:
if data_format == "NCDHW":
padding = (0, 0, padding[0], padding[1], padding[2])
else:
raise ValueError("error padding param!")
else:
raise ValueError("error padding param!")
(padding_type, pads_list) = calc_pool_padding(
padding, get_dhw_offset(channel_pos), 3
)
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
self._op = (
flow.builtin_op("max_pool_3d")
.Attr("data_format", channel_pos)
.Attr("pool_size", kernel_size)
.Attr("strides", strides)
.Attr("ceil_mode", ceil_mode)
.Attr("padding", padding_type)
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Input("x")
.Output("y")
.Build()
)
def forward(self, x):
return self._op(x)[0]
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 42.203901
| 164
| 0.60026
| 3,358
| 23,803
| 4.103335
| 0.107802
| 0.047173
| 0.008709
| 0.003484
| 0.825532
| 0.796575
| 0.775093
| 0.749546
| 0.728718
| 0.71609
| 0
| 0.024533
| 0.27736
| 23,803
| 563
| 165
| 42.278863
| 0.776525
| 0.617023
| 0
| 0.523364
| 0
| 0
| 0.096036
| 0.005252
| 0
| 0
| 0
| 0.001776
| 0.051402
| 1
| 0.046729
| false
| 0
| 0.03271
| 0.009346
| 0.126168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4156d9fbd9332da23346e331b0cfc44ceec0361
| 194
|
py
|
Python
|
tasks/models.py
|
SalyLopes/salynewtask
|
6368edc64f9b8a66497e63f878ceb866885c92fb
|
[
"Apache-2.0"
] | null | null | null |
tasks/models.py
|
SalyLopes/salynewtask
|
6368edc64f9b8a66497e63f878ceb866885c92fb
|
[
"Apache-2.0"
] | null | null | null |
tasks/models.py
|
SalyLopes/salynewtask
|
6368edc64f9b8a66497e63f878ceb866885c92fb
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Task(models.Model):
item = models.CharField(max_length=20)
status= models.CharField(max_length=20)
def __str__(self):
return self.item
| 19.4
| 44
| 0.701031
| 27
| 194
| 4.814815
| 0.666667
| 0.230769
| 0.276923
| 0.369231
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025806
| 0.201031
| 194
| 10
| 45
| 19.4
| 0.812903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f42febb6e022ccff3103cbdeec1166bc1e2f2bd0
| 127
|
py
|
Python
|
src/parade/error/flow_errors.py
|
bailaohe/parade
|
e2be18b7c5fa13136435e7a6a29399f9fa392870
|
[
"MIT"
] | 39
|
2017-03-07T06:20:03.000Z
|
2020-03-01T00:18:21.000Z
|
src/parade/error/flow_errors.py
|
bailaohe/parade
|
e2be18b7c5fa13136435e7a6a29399f9fa392870
|
[
"MIT"
] | 15
|
2017-03-07T08:21:21.000Z
|
2019-04-24T09:23:14.000Z
|
src/parade/error/flow_errors.py
|
bailaohe/parade
|
e2be18b7c5fa13136435e7a6a29399f9fa392870
|
[
"MIT"
] | 11
|
2017-03-11T07:13:43.000Z
|
2020-05-28T07:34:52.000Z
|
from . import ParadeError, FLOW_NOT_FOUND
class FlowNotFoundError(ParadeError):
(code, status, message) = FLOW_NOT_FOUND
| 21.166667
| 44
| 0.779528
| 15
| 127
| 6.333333
| 0.733333
| 0.147368
| 0.252632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141732
| 127
| 5
| 45
| 25.4
| 0.87156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f481fd0efc8de08bbbfb036098d9272b3e479cf3
| 16,909
|
py
|
Python
|
contracts/validator.py
|
brave-i/AlgoDex
|
ab4d53ba79abd46e8f2b3849ae654ca1f87f3bd2
|
[
"MIT"
] | null | null | null |
contracts/validator.py
|
brave-i/AlgoDex
|
ab4d53ba79abd46e8f2b3849ae654ca1f87f3bd2
|
[
"MIT"
] | null | null | null |
contracts/validator.py
|
brave-i/AlgoDex
|
ab4d53ba79abd46e8f2b3849ae654ca1f87f3bd2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from pyteal import *
# Manager App ID
MANAGER_INDEX = Int(15779041) # TODO: Update
# Keys
KEY_CREATOR = Bytes("C")
KEY_TOKEN1 = Bytes("T1")
KEY_TOKEN2 = Bytes("T2")
KEY_LIQUIDITY_TOKEN = Bytes("LT")
# Transaction Types
TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN1_TO_TOKEN2 = Bytes("s1")
TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN2_TO_TOKEN1 = Bytes("s2")
TRANSACTION_TYPE_ADD_LIQUIDITY_DEPOSIT = Bytes("a")
TRANSACTION_TYPE_WITHDRAW_LIQUIDITY = Bytes("w")
TRANSACTION_TYPE_REFUND = Bytes("r")
TRANSACTION_TYPE_WITHDRAW_PROTOCOL_FEES = Bytes("p")
def approval_program():
"""
This smart contract implements the Validator part of the AlgoSwap DEX.
It asserts the existence of all required transaction fields in every
transaction part of every possible atomic transaction group that AlgoSwap
supports (Swap Token 1 for Token 2, Swap Token 2 for Token 1, Add Liquidity,
Withdraw Liquidity, Withdraw Protocol Fees, and Refund).
Any atomic transaction group MUST have a transaction to the validator
smart contract as the first transaction of the group to proceed.
Commands:
s1 Swap Token 1 for Token 2 in a liquidity pair
s2 Swap Token 2 for Token 1 in a liquidity pair
a Add liquidity to a liquidity pool
w Withdraw liquidity from a liquidity pool
r Get a refund of unused tokens
p Withdraw protocol fees (Developer only)
"""
key_token1 = App.localGetEx(Int(1), MANAGER_INDEX, KEY_TOKEN1)
key_token2 = App.localGetEx(Int(1), MANAGER_INDEX, KEY_TOKEN2)
key_liquidity_token = App.localGetEx(Int(1), MANAGER_INDEX, KEY_LIQUIDITY_TOKEN)
# On application create, put the creator key in global storage
on_create = Seq([
App.globalPut(KEY_CREATOR, Txn.sender()),
Int(1)
])
# Closeout on validator does nothing
on_closeout = Int(1)
# Opt in on validator does nothing
on_opt_in = Int(1)
on_swap_deposit = Seq([
key_token1,
Assert(
And(
# Group has 3 transactions
Global.group_size() == Int(3),
# This ApplicationCall is the 1st transaction
Txn.group_index() == Int(0),
# No additional actions are needed from this transaction
Txn.on_completion() == OnComplete.NoOp,
# Has one additional account attached
Txn.accounts.length() == Int(1),
# Has two application arguments
Txn.application_args.length() == Int(2),
# Second txn to manager
# Is of type ApplicationCall
Gtxn[1].type_enum() == TxnType.ApplicationCall,
# No additional actions needed
Gtxn[1].on_completion() == OnComplete.NoOp,
# Has one additional account attached
Gtxn[1].accounts.length() == Int(1),
# Has two application arguments
Gtxn[1].application_args.length() == Int(2),
# Additional account is same in both calls
Txn.accounts[1] == Gtxn[1].accounts[1],
# Application argument is same in both calls
Txn.application_args[0] == Gtxn[1].application_args[0],
Txn.application_args[1] == Gtxn[1].application_args[1],
# Third txn to escrow
# Is of type AssetTransfer
Gtxn[2].type_enum() == TxnType.AssetTransfer,
# Transfer asset is TOKEN1
Gtxn[2].xfer_asset() == key_token1.value(),
# Asset sender is zero address
Gtxn[2].asset_sender() == Global.zero_address(),
# Asset receiver is attached account
Gtxn[2].asset_receiver() == Txn.accounts[1],
# Is not a close transaction
Gtxn[2].close_remainder_to() == Global.zero_address(),
# Is not a close asset transaction
Gtxn[2].asset_close_to() == Global.zero_address(),
)
),
Int(1)
])
on_swap_deposit_2 = Seq([
key_token2,
Assert(
And(
# Group has 3 transactions
Global.group_size() == Int(3),
# This ApplicationCall is the first transaction
Txn.group_index() == Int(0),
# No additional actions are needed from this transaction
Txn.on_completion() == OnComplete.NoOp,
# Has one additional account attached
Txn.accounts.length() == Int(1),
# Has two application arguments attached
Txn.application_args.length() == Int(2),
# Second txn to Manager
# Is of type ApplicationCall
Gtxn[1].type_enum() == TxnType.ApplicationCall,
# No additional actions needed
Gtxn[1].on_completion() == OnComplete.NoOp,
# Has one additional account attached
Gtxn[1].accounts.length() == Int(1),
# Has two application arguments attached
Gtxn[1].application_args.length() == Int(2),
# Additional account is same as first txn
Txn.accounts[1] == Gtxn[1].accounts[1],
# Application arguments are same as first txn
Txn.application_args[0] == Gtxn[1].application_args[0],
Txn.application_args[1] == Gtxn[1].application_args[1],
# Third txn to escrow
# Is of type AssetTransfer
Gtxn[2].type_enum() == TxnType.AssetTransfer,
# Transfer asset is Token 2
Gtxn[2].xfer_asset() == key_token2.value(),
# Sender is zero address
Gtxn[2].asset_sender() == Global.zero_address(),
# Asset receiver is attached account
Gtxn[2].asset_receiver() == Txn.accounts[1],
# Is not a close transaction
Gtxn[2].close_remainder_to() == Global.zero_address(),
# Is not a close asset transaction
Gtxn[2].asset_close_to() == Global.zero_address(),
)
),
Int(1)
])
on_add_liquidity_deposit = Seq([
key_token1,
key_token2,
Assert(
And(
# Group has 4 transactions
Global.group_size() == Int(4),
# This ApplicationCall is the first transaction
Txn.group_index() == Int(0),
# No additional actions needed from this transaction
Txn.on_completion() == OnComplete.NoOp,
# Has one additional account attached
Txn.accounts.length() == Int(1),
# Has two application arguments attached
Txn.application_args.length() == Int(2),
# NOTE: No way to check length of foreign assets in PyTeal
# Second txn to Manager
# is of type ApplicationCall
Gtxn[1].type_enum() == TxnType.ApplicationCall,
# No additional actions needed
Gtxn[1].on_completion() == OnComplete.NoOp,
# Has one additional account attached
Gtxn[1].accounts.length() == Int(1),
# Has two application arguments attached
Gtxn[1].application_args.length() == Int(2),
# Additional accounts are same as first txn
Txn.accounts[1] == Gtxn[1].accounts[1],
# Application arguments are same as first txn
Txn.application_args[0] == Gtxn[1].application_args[0],
Txn.application_args[1] == Gtxn[1].application_args[1],
# Third txn to Escrow
# Is of type AssetTransfer
Gtxn[2].type_enum() == TxnType.AssetTransfer,
# Transfer asset is Token 1
Gtxn[2].xfer_asset() == key_token1.value(),
# Asset sender is zero address
Gtxn[2].asset_sender() == Global.zero_address(),
# Asset receiver is the escrow account
Gtxn[2].asset_receiver() == Txn.accounts[1],
# Is not a close transaction
Gtxn[2].close_remainder_to() == Global.zero_address(),
# Is not a close asset transaction
Gtxn[2].asset_close_to() == Global.zero_address(),
# Fourth txn to Escrow
# Is of type AssetTransfer
Gtxn[3].type_enum() == TxnType.AssetTransfer,
# Transfer asset is Token 2
Gtxn[3].xfer_asset() == key_token2.value(),
# Asset sender is zero address
Gtxn[3].asset_sender() == Global.zero_address(),
# Asset receiver is the escrow account
Gtxn[3].asset_receiver() == Txn.accounts[1],
# Is not a close transaction
Gtxn[3].close_remainder_to() == Global.zero_address(),
# Is not a close asset transaction
Gtxn[3].asset_close_to() == Global.zero_address(),
)
),
Int(1)
])
on_withdraw_liquidity = Seq([
key_liquidity_token,
Assert(
And(
# Group has 3 transactions
Global.group_size() == Int(3),
# This ApplicationCall is the first transaction
Txn.group_index() == Int(0),
# No additional actions are needed from this transaction
Txn.on_completion() == OnComplete.NoOp,
# Has one additional account attached
Txn.accounts.length() == Int(1),
# Has three application arguments attached
Txn.application_args.length() == Int(3),
# NOTE: No way to check length of foreign assets in PyTeal
# Second txn to Manager
# is of type ApplicationCall
Gtxn[1].type_enum() == TxnType.ApplicationCall,
# No additional actions needed
Gtxn[1].on_completion() == OnComplete.NoOp,
# Has two additional accounts attached
Gtxn[1].accounts.length() == Int(1),
# Has three application arguments attached
Gtxn[1].application_args.length() == Int(3),
# Additional accounts are same as first txn
Txn.accounts[1] == Gtxn[1].accounts[1],
# Application arguments are same as first txn
Txn.application_args[0] == Gtxn[1].application_args[0],
Txn.application_args[1] == Gtxn[1].application_args[1],
Txn.application_args[2] == Gtxn[1].application_args[2],
# Third txn to Escrow
# is of type AssetTransfer
Gtxn[2].type_enum() == TxnType.AssetTransfer,
# Transfer asset is liquidity token
Gtxn[2].xfer_asset() == key_liquidity_token.value(),
# Asset sender is zero address
Gtxn[2].asset_sender() == Global.zero_address(),
# Asset receiver is the escrow account
Gtxn[2].asset_receiver() == Txn.accounts[1],
# Is not a close transaction
Gtxn[2].close_remainder_to() == Global.zero_address(),
# Is not a close asset transaction
Gtxn[2].asset_close_to() == Global.zero_address(),
)
),
Int(1),
])
on_withdraw_protocol_fees = Seq([
key_token1,
key_token2,
Assert(
And(
# Group has 4 transactions
Global.group_size() == Int(4),
# This ApplicationCall is the first transaction
Txn.group_index() == Int(0),
# No additional actions needed from this transaction
Txn.on_completion() == OnComplete.NoOp,
# Has one additional account attached
Txn.accounts.length() == Int(1),
# Has one application argument attached
Txn.application_args.length() == Int(1),
# Sender is developer
Txn.sender() == App.globalGet(KEY_CREATOR),
# Second txn to Manager
# is of type ApplicationCall
Gtxn[1].type_enum() == TxnType.ApplicationCall,
# No additional actions needed
Gtxn[1].on_completion() == OnComplete.NoOp,
# Has one additional account attached
Gtxn[1].accounts.length() == Int(1),
# Has one application argument attached
Gtxn[1].application_args.length() == Int(1),
# Additional account is same as first txn
Txn.accounts[1] == Gtxn[1].accounts[1],
# Application argument is same as first txn
Txn.application_args[0] == Gtxn[1].application_args[0],
# Sender is developer
Gtxn[1].sender() == App.globalGet(KEY_CREATOR),
# Third txn from Escrow to Developer
# is of type AssetTransfer
Gtxn[2].type_enum() == TxnType.AssetTransfer,
# Transfer asset is Token 1
Gtxn[2].xfer_asset() == key_token1.value(),
# sender is escrow
Gtxn[2].sender() == Txn.accounts[1],
# is not a clawback transaction
Gtxn[2].asset_sender() == Global.zero_address(),
# Fourth txn from Escrow to Developer
# is of type AssetTransfer
Gtxn[3].type_enum() == TxnType.AssetTransfer,
# Transfer asset is Token 2
Gtxn[3].xfer_asset() == key_token2.value(),
# sender is escrow
Gtxn[3].sender() == Txn.accounts[1],
# is not a clawback transaction
Gtxn[3].asset_sender() == Global.zero_address(),
)
),
Int(1)
])
on_refund = Seq([
Assert(
And(
# Group has 3 transactions
Global.group_size() == Int(3),
# This ApplicationCall is the first transaction
Txn.group_index() == Int(0),
# No additional actions needed from this transaction
Txn.on_completion() == OnComplete.NoOp,
# Has one additional account attached
Txn.accounts.length() == Int(1),
# Has one application argument attached
Txn.application_args.length() == Int(1),
# Second txn to Manager
# is of type ApplicationCall
Gtxn[1].type_enum() == TxnType.ApplicationCall,
# No additional actions needed
Gtxn[1].on_completion() == OnComplete.NoOp,
# Has one additional account attached
Gtxn[1].accounts.length() == Int(1),
# Has one application argument attached
Gtxn[1].application_args.length() == Int(1),
# Additional account is same as first txn
Txn.accounts[1] == Gtxn[1].accounts[1],
# Application argument is same as first txn
Txn.application_args[0] == Gtxn[1].application_args[0],
# Third txn from Escrow
# is of type AssetTransfer
Gtxn[2].type_enum() == TxnType.AssetTransfer,
# sender is escrow
Gtxn[2].sender() == Txn.accounts[1],
# is not a clawback transaction
Gtxn[2].asset_sender() == Global.zero_address(),
)
),
Int(1)
])
program = Cond(
[Txn.application_id() == Int(0),
on_create],
[Txn.on_completion() == OnComplete.CloseOut,
on_closeout],
[Txn.on_completion() == OnComplete.OptIn,
on_opt_in],
[Txn.application_args[0] == TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN1_TO_TOKEN2,
on_swap_deposit],
[Txn.application_args[0] == TRANSACTION_TYPE_SWAP_DEPOSIT_TOKEN2_TO_TOKEN1,
on_swap_deposit_2],
[Txn.application_args[0] == TRANSACTION_TYPE_ADD_LIQUIDITY_DEPOSIT,
on_add_liquidity_deposit],
[Txn.application_args[0] == TRANSACTION_TYPE_WITHDRAW_LIQUIDITY,
on_withdraw_liquidity],
[Txn.application_args[0] == TRANSACTION_TYPE_REFUND,
on_refund],
[Txn.application_args[0] == TRANSACTION_TYPE_WITHDRAW_PROTOCOL_FEES,
on_withdraw_protocol_fees],
)
return program
def clear_program():
return Int(1)
| 41.647783
| 84
| 0.549057
| 1,840
| 16,909
| 4.902717
| 0.088587
| 0.023279
| 0.045893
| 0.03769
| 0.834497
| 0.804124
| 0.784503
| 0.740273
| 0.724643
| 0.711784
| 0
| 0.022587
| 0.358507
| 16,909
| 405
| 85
| 41.750617
| 0.809072
| 0.300077
| 0
| 0.673077
| 0
| 0
| 0.001289
| 0
| 0
| 0
| 0
| 0.002469
| 0.028846
| 1
| 0.009615
| false
| 0
| 0.004808
| 0.004808
| 0.024038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4849cffd580600cad74c2411b1650b1e8ef1a63
| 2,880
|
py
|
Python
|
marketvis/visualization.py
|
ryanpstauffer/market-vis
|
257e23af786ff5612a2765a15efe8fb54e63fdb0
|
[
"MIT"
] | 2
|
2016-04-04T22:44:10.000Z
|
2021-07-16T10:32:19.000Z
|
marketvis/visualization.py
|
hyperfraise/market-vis
|
257e23af786ff5612a2765a15efe8fb54e63fdb0
|
[
"MIT"
] | 6
|
2016-04-04T19:02:52.000Z
|
2016-04-06T18:37:13.000Z
|
marketvis/visualization.py
|
hyperfraise/market-vis
|
257e23af786ff5612a2765a15efe8fb54e63fdb0
|
[
"MIT"
] | 4
|
2016-04-04T22:44:58.000Z
|
2021-07-16T10:32:21.000Z
|
# -*- coding: utf-8 -*-
"""
[Python 2.7 (Mayavi is not yet compatible with Python 3+)]
Created on Tue Feb 10 18:27:17 2015
@author: Ryan Stauffer
https://github.com/ryanpstauffer/market-vis
Market Visualization Prototype
Visualization and Interactive module
"""
import numpy as np
import moviepy.editor as mpy
def visualizePrices(prices):
'''Creates a mayavi visualization of a pd DataFrame containing stock prices
Inputs:
prices => a pd DataFrame, w/ index: dates; columns: company names
'''
#Imports mlab here to delay starting of mayavi engine until necessary
from mayavi import mlab
#Because of current mayavi requirements, replaces dates and company names with integers
x_length, y_length = prices.shape
xTime = np.array([list(xrange(x_length)),] * y_length).transpose()
yCompanies = np.array([list(xrange(y_length)),] * x_length)
#Sort indexed prices by total return on last date
lastDatePrices = prices.iloc[-1]
lastDatePrices.sort_values(inplace=True)
sortOrder = lastDatePrices.index
zPrices = prices[sortOrder]
#Create mayavi2 object
fig = mlab.figure(bgcolor=(.4,.4,.4))
vis = mlab.surf(xTime, yCompanies, zPrices)
mlab.outline(vis)
mlab.orientation_axes(vis)
#mlab.title('S&P 500 Market Data Visualization', size = .25)
mlab.axes(vis, nb_labels=0, xlabel = 'Time', ylabel = 'Company', zlabel = 'Price')
mlab.show()
def make_frame(t):
mlab.view(elevation=70, azimuth=360*t/4.0, distance=1400) #Camera angle
return mlab.screenshot(antialiased=True)
def animateGIF(filename, prices):
'''Creates a mayavi visualization of a pd DataFrame containing stock prices
Then uses MoviePy to animate and save as a gif
Inputs:
prices => a pd DataFrame, w/ index: dates; columns: company names
'''
#Imports mlab here to delay starting of mayavi engine until necessary
from mayavi import mlab
#Because of current mayavi requirements, replaces dates and company names with integers
x_length, y_length = prices.shape
xTime = np.array([list(xrange(x_length)),] * y_length).transpose()
yCompanies = np.array([list(xrange(y_length)),] * x_length)
#Sort indexed prices by total return on last date
lastDatePrices = prices.iloc[-1]
lastDatePrices.sort_values(inplace=True)
sortOrder = lastDatePrices.index
zPrices = prices[sortOrder]
#Create mayavi2 object
fig = mlab.figure(bgcolor=(.4,.4,.4))
vis = mlab.surf(xTime, yCompanies, zPrices)
mlab.outline(vis)
mlab.orientation_axes(vis)
mlab.axes(vis, nb_labels=0, xlabel = 'Time', ylabel = 'Company', zlabel = 'Price')
animation = mpy.VideoClip(make_frame, duration = 4).resize(1.0)
animation.write_gif(filename, fps=20)
| 37.402597
| 92
| 0.68125
| 381
| 2,880
| 5.094488
| 0.406824
| 0.021638
| 0.02473
| 0.028851
| 0.723338
| 0.723338
| 0.723338
| 0.723338
| 0.723338
| 0.723338
| 0
| 0.02171
| 0.216319
| 2,880
| 77
| 93
| 37.402597
| 0.838281
| 0.386111
| 0
| 0.722222
| 0
| 0
| 0.019572
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f488f75c5f0b6a0b72d4b6e75d30aee369ad29dd
| 63
|
py
|
Python
|
tests/core/test_import.py
|
xiawu/newchain-account.py
|
c7d8af2161e98580f5d7add3e862948a6a827ef1
|
[
"MIT"
] | 1
|
2019-06-08T14:14:07.000Z
|
2019-06-08T14:14:07.000Z
|
tests/core/test_import.py
|
xiawu/newchain-account.py
|
c7d8af2161e98580f5d7add3e862948a6a827ef1
|
[
"MIT"
] | null | null | null |
tests/core/test_import.py
|
xiawu/newchain-account.py
|
c7d8af2161e98580f5d7add3e862948a6a827ef1
|
[
"MIT"
] | null | null | null |
def test_import():
import newchain_account # noqa: F401
| 12.6
| 41
| 0.698413
| 8
| 63
| 5.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0.222222
| 63
| 4
| 42
| 15.75
| 0.795918
| 0.15873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f4891b303768ca6cbc7faac91403144c620ae36a
| 31
|
py
|
Python
|
cx_Freeze/samples/advanced/modules/testfreeze_1.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 358
|
2020-07-02T13:00:02.000Z
|
2022-03-29T10:03:57.000Z
|
cx_Freeze/samples/advanced/modules/testfreeze_1.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 372
|
2020-07-02T20:47:57.000Z
|
2022-03-31T19:35:05.000Z
|
cx_Freeze/samples/advanced/modules/testfreeze_1.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 78
|
2020-07-09T14:24:03.000Z
|
2022-03-22T19:06:52.000Z
|
print("Test freeze module #1")
| 15.5
| 30
| 0.709677
| 5
| 31
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.129032
| 31
| 1
| 31
| 31
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
be547c7b6d001a06456d6084d3b1147b512042f9
| 122
|
py
|
Python
|
HFSSdrawpyC12/__init__.py
|
c12qe/HFSSdrawpy
|
ef0d7218fdfe0a9d868deb83f8b50907e99ebd37
|
[
"MIT"
] | 8
|
2020-06-10T08:51:33.000Z
|
2022-03-23T01:19:47.000Z
|
HFSSdrawpyC12/__init__.py
|
c12qe/HFSSdrawpy
|
ef0d7218fdfe0a9d868deb83f8b50907e99ebd37
|
[
"MIT"
] | 17
|
2020-05-06T12:16:43.000Z
|
2021-03-27T17:33:56.000Z
|
HFSSdrawpyC12/__init__.py
|
c12qe/HFSSdrawpy
|
ef0d7218fdfe0a9d868deb83f8b50907e99ebd37
|
[
"MIT"
] | 14
|
2020-05-06T11:04:10.000Z
|
2021-10-19T05:48:10.000Z
|
from .core.body import Body
from .core.entity import Entity
from .core.modeler import Modeler
from .core.port import Port
| 24.4
| 33
| 0.803279
| 20
| 122
| 4.9
| 0.35
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 122
| 4
| 34
| 30.5
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bea1c79feec52aecd5a96d1dabce33f75ae0ed53
| 104
|
py
|
Python
|
site/thicc/apps/rules/views.py
|
aldenjenkins/ThiccGaming
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
[
"BSD-3-Clause"
] | null | null | null |
site/thicc/apps/rules/views.py
|
aldenjenkins/ThiccGaming
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
[
"BSD-3-Clause"
] | 9
|
2020-03-24T16:20:31.000Z
|
2022-03-11T23:32:38.000Z
|
site/thicc/apps/rules/views.py
|
aldenjenkins/ThiccGaming
|
4790d2568b019438d1569d0fe4e9f9aba008b737
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
def index(request):
return render(request, 'rules/rules.html')
| 20.8
| 46
| 0.759615
| 14
| 104
| 5.642857
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 104
| 4
| 47
| 26
| 0.877778
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
fe20fef8ed4e19f994fded3d145994639a283682
| 104
|
py
|
Python
|
net/__init__.py
|
renjunxiang/ccks2019_el
|
67b7b35312c06248ea1deccbfb37cf5d8e5c6376
|
[
"MIT"
] | 99
|
2019-08-01T01:04:54.000Z
|
2022-03-17T09:00:14.000Z
|
net/__init__.py
|
ZhouXiaoLeilei/ccks2019_el-1
|
67b7b35312c06248ea1deccbfb37cf5d8e5c6376
|
[
"MIT"
] | 5
|
2019-08-06T02:16:20.000Z
|
2021-12-12T15:37:27.000Z
|
net/__init__.py
|
ZhouXiaoLeilei/ccks2019_el-1
|
67b7b35312c06248ea1deccbfb37cf5d8e5c6376
|
[
"MIT"
] | 18
|
2019-08-10T11:18:29.000Z
|
2022-03-15T04:44:52.000Z
|
from .dataset import MyDataset, collate_fn, deal_eval, seqs2batch, collate_fn_link
from .Net import Net
| 34.666667
| 82
| 0.826923
| 16
| 104
| 5.125
| 0.6875
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.115385
| 104
| 2
| 83
| 52
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe43e146c8657bad99dec21dde742084c42e8ca8
| 44
|
py
|
Python
|
tests/res/apps/urls_app/views.py
|
appsumo/Coffin-custom
|
172a8efa4f3deeac1d0c7adbd0f114dbb73bbd8a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/res/apps/urls_app/views.py
|
appsumo/Coffin-custom
|
172a8efa4f3deeac1d0c7adbd0f114dbb73bbd8a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/res/apps/urls_app/views.py
|
appsumo/Coffin-custom
|
172a8efa4f3deeac1d0c7adbd0f114dbb73bbd8a
|
[
"BSD-3-Clause"
] | null | null | null |
def index(r):
pass
def sum(r):
pass
| 8.8
| 13
| 0.545455
| 8
| 44
| 3
| 0.625
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 44
| 5
| 14
| 8.8
| 0.8
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
feb0701d3d257b89dd24176ecf0e1ddc9e438c1a
| 54
|
py
|
Python
|
loaders/__init__.py
|
r4ghu/IntroToPyTorch
|
6d9b326adf70a9dbcd99a9713a4159de90d9d2fd
|
[
"Apache-2.0"
] | 5
|
2019-03-24T07:33:12.000Z
|
2021-08-10T07:10:00.000Z
|
loaders/__init__.py
|
r4ghu/IntroToPyTorch
|
6d9b326adf70a9dbcd99a9713a4159de90d9d2fd
|
[
"Apache-2.0"
] | 1
|
2019-07-30T02:08:18.000Z
|
2019-07-30T02:08:18.000Z
|
loaders/__init__.py
|
r4ghu/StyleTransfer-PyTorch
|
ce0dbb4515d2b4a38692a959a04015bb90caf9ac
|
[
"BSD-3-Clause"
] | 1
|
2021-08-10T07:10:01.000Z
|
2021-08-10T07:10:01.000Z
|
from .data_loader import *
from .model_loader import *
| 27
| 27
| 0.796296
| 8
| 54
| 5.125
| 0.625
| 0.585366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 27
| 27
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
feb6ecedd517ee5e7a14248fbb561f3ef07997ee
| 2,820
|
py
|
Python
|
api/detimotic/hono.py
|
catarinaacsilva/eclipseHono-Ditto
|
1bfd4bf3c26b5c30bb3107dceabb33cf1b54ea63
|
[
"MIT"
] | 1
|
2020-03-18T12:36:56.000Z
|
2020-03-18T12:36:56.000Z
|
api/detimotic/hono.py
|
catarinaacsilva/eclipseHono_Ditto
|
1bfd4bf3c26b5c30bb3107dceabb33cf1b54ea63
|
[
"MIT"
] | null | null | null |
api/detimotic/hono.py
|
catarinaacsilva/eclipseHono_Ditto
|
1bfd4bf3c26b5c30bb3107dceabb33cf1b54ea63
|
[
"MIT"
] | 1
|
2020-09-15T04:01:45.000Z
|
2020-09-15T04:01:45.000Z
|
# coding: utf-8
__author__ = 'Catarina Silva'
__version__ = '0.2'
__email__ = 'c.alexandracorreia@ua.pt'
__status__ = 'Development'
import logging
import requests
#logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',datefmt='%m-%d %H:%M:%S')
logger = logging.getLogger('HONO API')
class Hono:
def __init__(self, addr='192.168.85.107'):
self.addr = addr
def tenant_create(self, tenant):
url = 'http://{}:28080/tenant/'.format(self.addr)
response = requests.post(url, json={'tenant-id':tenant})
print(response.status_code)
if int(response.status_code//100) == 2:
return True
else:
logger.error('%s: %s', response.status_code, response.text)
return False
def tenant_delete(self, tenant):
url = 'http://{}:28080/tenant/{}'.format(self.addr, tenant)
response = requests.delete(url)
if int(response.status_code//100) == 2:
return True
else:
logger.error('%s: %s', response.status_code, response.text)
return False
def device_create(self, tenant, device):
url = 'http://{}:28080/registration/{}'.format(self.addr, tenant)
response = requests.post(url, json={'device-id':device})
if int(response.status_code//100) == 2:
return True
else:
logger.error('%s: %s'.response.status_code, response.text)
return False
def device_delete(self, tenant, device):
url = 'http://{}:28080/registration/{}/{}'.format(self.addr, tenant, device)
response = requests.delete(url)
if int(response.status_code//100) == 2:
return True
else:
logger.error('%s: %s', response.status_code, response.text)
return False
def credentials_create(self, tenant, device, login, password):
url = 'http://{}:28080/credentials/{}'.format(self.addr, tenant)
response = requests.post(url, json={
'device-id': device,
'type': 'hashed-password',
'auth-id': login,
'secrets': [{'pwd-plain': password}]})
if int(response.status_code//100) == 2:
return True
else:
logger.error('%s: %s', response.status_code, response.text)
return False
def credentials_create(self, tenant, device, login, password):
url = 'http://{}:28080/credentials/{}/{}'.format(self.addr, tenant, device)
response = requests.delete(url)
if int(response.status_code//100) == 2:
return True
else:
logger.error('%s: %s', response.status_code, response.text)
return False
#hono = Hono()
#tenant = 'demo'
#device = 'laptop'
#print(hono.tenant_create('demo'))
| 31.685393
| 127
| 0.593972
| 332
| 2,820
| 4.924699
| 0.243976
| 0.111315
| 0.143119
| 0.069725
| 0.722324
| 0.70581
| 0.702141
| 0.702141
| 0.702141
| 0.650765
| 0
| 0.03357
| 0.25
| 2,820
| 88
| 128
| 32.045455
| 0.73948
| 0.07695
| 0
| 0.539683
| 0
| 0
| 0.136749
| 0.009245
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.063492
| 0.031746
| 0
| 0.349206
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
22915b91623e8b3eeb5b8c65d97961d3e45b148d
| 14,570
|
py
|
Python
|
morf-python-api/build/lib/morf/workflow/extract.py
|
jpgard/morf
|
f17afcacef68929a5ce9e7714208be1002a42418
|
[
"MIT"
] | 14
|
2018-06-27T13:15:46.000Z
|
2021-08-30T08:24:38.000Z
|
morf-python-api/build/lib/morf/workflow/extract.py
|
jpgard/morf
|
f17afcacef68929a5ce9e7714208be1002a42418
|
[
"MIT"
] | 58
|
2018-02-03T15:31:15.000Z
|
2019-10-15T02:12:05.000Z
|
morf-python-api/build/lib/morf/workflow/extract.py
|
jpgard/morf
|
f17afcacef68929a5ce9e7714208be1002a42418
|
[
"MIT"
] | 7
|
2018-03-29T14:47:34.000Z
|
2021-06-22T01:34:52.000Z
|
# Copyright (c) 2018 The Regents of the University of Michigan
# and the University of Pennsylvania
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Feature extraction functions for the MORF 2.0 API. For more information about the API, see the documentation.
"""
from multiprocessing import Pool
from morf.utils.alerts import send_email_alert
from morf.utils.api_utils import *
from morf.utils.config import MorfJobConfig
from morf.utils.job_runner_utils import run_image
from morf.utils.log import set_logger_handlers
# define module-level variables for config.properties
CONFIG_FILENAME = "config.properties"
module_logger = logging.getLogger(__name__)
def extract_all():
"""
Extract features using the docker image across all courses and all sessions except holdout.
:return:
"""
mode = "extract"
level = "all"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
# only call job_runner once with --mode-extract and --level=all; this will load ALL data up and run the docker image
run_image(job_config, job_config.raw_data_buckets, level=level)
result_file = collect_all_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_course(raw_data_dir="morf-data/", multithread = True):
"""
Extract features using the Docker image, building individual feature sets for each course.
:return:
"""
mode = "extract"
level = "course"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
# call job_runner once percourse with --mode=extract and --level=course
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, None, level, None])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
result_file = collect_course_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_session(labels=False, raw_data_dir="morf-data/", label_type="labels-train", multithread=True):
"""
Extract features using the Docker image, building individual feature sets for each "session" or iteration of the course.
:labels: flag for whether this is a job to generate output labels; if so, the collected result file is copied back into the raw data folder in s3 (as labels-train.csv).
:raw_data_dir: path to directory in all data buckets where course-level directories are located; this should be uniform for every raw data bucket.
:label_type: type of outcome label to use (string).
:multithread: whether to run job in parallel (multithread = false can be useful for debugging).
:return:
"""
level = "session"
mode = "extract"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# # clear any preexisting data for this user/job/mode and set number of cores
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
## for each bucket, call job_runner once per session with --mode=extract and --level=session
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
for session in fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course, fetch_holdout_session_only=False):
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, session, level])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
if not labels: # normal feature extraction job; collects features across all buckets and upload to proc_data_bucket
result_file = collect_session_results(job_config)
upload_key = "{}/{}/extract/{}".format(job_config.user_id, job_config.job_id, result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
else: # label extraction job; copy file into raw course data dir instead of proc_data_bucket, creating separate label files for each bucket
for raw_data_bucket in job_config.raw_data_buckets:
result_file = collect_session_results(job_config, raw_data_buckets=[raw_data_bucket])
upload_key = raw_data_dir + "{}.csv".format(label_type)
upload_file_to_s3(result_file, bucket=raw_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_holdout_all():
"""
Extract features using the Docker image across all courses and all sessions of holdout data.
:return:
"""
mode = "extract-holdout"
level = "all"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
# only call job_runner once with --mode-extract and --level=all; this will load ALL data up and run the docker image
run_image(job_config, job_config.raw_data_buckets, level=level)
result_file = collect_all_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_holdout_course(raw_data_dir="morf-data/", multithread = True):
"""
Extract features using the Docker image across each course of holdout data.
:return:
"""
mode = "extract-holdout"
level = "course"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
# call job_runner once percourse with --mode=extract and --level=course
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
holdout_session = fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course,
fetch_holdout_session_only=True)[0] # only use holdout run; unlisted
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, holdout_session, level, None])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
result_file = collect_course_results(job_config)
upload_key = make_s3_key_path(job_config, filename=result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def extract_holdout_session(labels=False, raw_data_dir="morf-data/", label_type="labels-train", multithread=True):
"""
Extract features using the Docker image across each session of holdout data.
:labels: flag for whether this is a job to generate output labels; if so, the collected result file is copied back into the raw data folder in s3 (as labels-test.csv).
:return: None
"""
mode = "extract-holdout"
level = "session"
job_config = MorfJobConfig(CONFIG_FILENAME)
job_config.update_mode(mode)
logger = set_logger_handlers(module_logger, job_config)
# call job_runner once per session with --mode=extract-holdout and --level=session
# clear any preexisting data for this user/job/mode
clear_s3_subdirectory(job_config)
if multithread:
num_cores = job_config.max_num_cores
else:
num_cores = 1
for raw_data_bucket in job_config.raw_data_buckets:
logger.info("[INFO] processing bucket {}".format(raw_data_bucket))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
reslist = []
with Pool(num_cores) as pool:
for course in courses:
holdout_session = fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course,
fetch_holdout_session_only=True)[0] # only use holdout run; unlisted
poolres = pool.apply_async(run_image, [job_config, raw_data_bucket, course, holdout_session, level])
reslist.append(poolres)
pool.close()
pool.join()
for res in reslist:
logger.info(res.get())
if not labels: # normal feature extraction job; collects features across all buckets and upload to proc_data_bucket
result_file = collect_session_results(job_config, holdout=True)
upload_key = "{}/{}/{}/{}".format(job_config.user_id, job_config.job_id, job_config.mode, result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
else: # label extraction job; copy file into raw course data dir instead of proc_data_bucket, creating separate label files for each bucket
for raw_data_bucket in job_config.raw_data_buckets:
result_file = collect_session_results(job_config, raw_data_buckets=[raw_data_bucket], holdout = True)
upload_key = raw_data_dir + "{}-test.csv".format(label_type)
upload_file_to_s3(result_file, bucket=raw_data_bucket, key=upload_key)
os.remove(result_file)
send_email_alert(job_config)
return
def fork_features(job_id_to_fork, raw_data_dir = "morf-data/"):
"""
Copies features from job_id_to_fork into current job_id.
:param job_id_to_fork: string, name of job_id (must be from same user).
:return: None.
"""
job_config = MorfJobConfig(CONFIG_FILENAME)
#todo: multithread this
for mode in ["extract", "extract-holdout"]:
job_config.update_mode(mode)
clear_s3_subdirectory(job_config)
for raw_data_bucket in job_config.raw_data_buckets:
print("[INFO] forking features from bucket {} mode {}".format(raw_data_bucket, mode))
courses = fetch_courses(job_config, raw_data_bucket, raw_data_dir)
for course in courses:
for session in fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course,
fetch_holdout_session_only = mode == "extract-holdout"):
# get current location of file, with old jobid name
prev_job_archive_filename = generate_archive_filename(job_config, course = course, session = session, mode = mode, job_id = job_id_to_fork)
# get location of prev archive file in s3
prev_job_key = make_s3_key_path(job_config, filename=prev_job_archive_filename, course=course, session=session, mode=mode, job_id=job_id_to_fork)
prev_job_s3_url = "s3://{}/{}".format(job_config.proc_data_bucket, prev_job_key)
# make new location of file, with new jobid name
current_job_archive_filename = generate_archive_filename(job_config, course=course, session=session,
mode=mode)
# copy frmo current location to new location
current_job_key = make_s3_key_path(job_config, filename=current_job_archive_filename, course=course,
session=session, mode=mode)
current_job_s3_url = "s3://{}/{}".format(job_config.proc_data_bucket, current_job_key)
copy_s3_file(job_config, sourceloc = prev_job_s3_url, destloc = current_job_s3_url)
# after copying individual extraction results, copy collected feature file
result_file = collect_session_results(job_config, holdout = mode == "extract-holdout")
upload_key = "{}/{}/{}/{}".format(job_config.user_id, job_config.job_id, job_config.mode, result_file)
upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key)
return
| 50.590278
| 172
| 0.704118
| 2,016
| 14,570
| 4.815476
| 0.130456
| 0.087145
| 0.040173
| 0.039555
| 0.746807
| 0.735579
| 0.730326
| 0.730326
| 0.717141
| 0.690255
| 0
| 0.00387
| 0.219698
| 14,570
| 287
| 173
| 50.766551
| 0.850031
| 0.297117
| 0
| 0.780105
| 0
| 0
| 0.04477
| 0
| 0
| 0
| 0
| 0.003484
| 0
| 1
| 0.036649
| false
| 0
| 0.031414
| 0
| 0.104712
| 0.005236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22ccae36cfa8b58a84dc31a2ae406b9e7a0c57a5
| 18,119
|
py
|
Python
|
base/target/docker-startup/10-initial.startup/gp_startup/gp_log.py
|
GriffinPlus/docker-base
|
9444072146a43abba971b55e8744b1619814ad32
|
[
"MIT"
] | null | null | null |
base/target/docker-startup/10-initial.startup/gp_startup/gp_log.py
|
GriffinPlus/docker-base
|
9444072146a43abba971b55e8744b1619814ad32
|
[
"MIT"
] | null | null | null |
base/target/docker-startup/10-initial.startup/gp_startup/gp_log.py
|
GriffinPlus/docker-base
|
9444072146a43abba971b55e8744b1619814ad32
|
[
"MIT"
] | 1
|
2021-07-23T12:00:08.000Z
|
2021-07-23T12:00:08.000Z
|
"""
This module contains logging functions.
Author: Sascha Falk <sascha@falk-online.eu>
License: MIT License
"""
import abc
import datetime
import os
import socket
import sys
from syslog import syslog, openlog, closelog, \
LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR, LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG, \
LOG_KERN, LOG_USER, LOG_MAIL, LOG_DAEMON, LOG_AUTH, LOG_LPR, LOG_NEWS, LOG_UUCP, LOG_CRON, LOG_SYSLOG, \
LOG_LOCAL0, LOG_LOCAL1, LOG_LOCAL2, LOG_LOCAL3, LOG_LOCAL4, LOG_LOCAL5, LOG_LOCAL6, LOG_LOCAL7
from .gp_extensions import classproperty
# ---------------------------------------------------------------------------------------------------------------------
class LoggerBase(object):
"""
Base class for custom loggers.
"""
__metaclass__ = abc.ABCMeta
_debug_level_enabled = False
_info_level_enabled = False
_note_level_enabled = False
_warning_level_enabled = False
_error_level_enabled = False
def __init__(self):
"""
Initializes the object.
"""
self.set_verbosity(4) # all levels except 'debug'
@abc.abstractmethod
def write_debug(self, text, *args):
"""
Writes a debug message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
raise NotImplementedError("The method is abstract.")
@abc.abstractmethod
def write_info(self, text, *args):
"""
Writes an informational message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
raise NotImplementedError("The method is abstract.")
@abc.abstractmethod
def write_note(self, text, *args):
"""
Writes a note to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
raise NotImplementedError("The method is abstract.")
@abc.abstractmethod
def write_warning(self, text, *args):
"""
Writes a warning to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
raise NotImplementedError("The method is abstract.")
@abc.abstractmethod
def write_error(self, text, *args):
"""
Writes an error to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
raise NotImplementedError("The method is abstract.")
@property
def uses_stdio(self):
"""
Gets a value indicating whether the log writes to stdout/stderr.
Returns:
Always False.
"""
return False
def set_verbosity(self, level):
"""
Sets the verbosity of startup system.
Args:
level (int): The minimum severity level of log messages to show:
0 = logging disabled
1 = error only
2 = error and warning
3 = error, warning and note
4 = error, warning, note and info
5 = all messages (error, warning, note, info, debug)
"""
self._error_level_enabled = level > 0
self._warning_level_enabled = level > 1
self._note_level_enabled = level > 2
self._info_level_enabled = level > 3
self._debug_level_enabled = level > 4
# ---------------------------------------------------------------------------------------------------------------------
class StdioLogger(LoggerBase):
"""
A logger that writes messages to stdio/stderr.
"""
def __init__(self):
"""
Initializes the object.
"""
super().__init__()
def write_debug(self, text, *args):
"""
Writes a debug message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._debug_level_enabled: return
message = str(datetime.datetime.now()) + ' [debug] ' + text.format(*args) + '\n'
sys.stdout.write(message)
def write_info(self, text, *args):
"""
Writes an informational message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._info_level_enabled: return
message = str(datetime.datetime.now()) + ' [info] ' + text.format(*args) + '\n'
sys.stdout.write(message)
def write_note(self, text, *args):
"""
Writes a note to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._note_level_enabled: return
message = str(datetime.datetime.now()) + ' [note] ' + text.format(*args) + '\n'
sys.stdout.write(message)
def write_warning(self, text, *args):
"""
Writes a warning to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._warning_level_enabled: return
message = str(datetime.datetime.now()) + ' [warning] ' + text.format(*args) + '\n'
sys.stdout.write(message)
def write_error(self, text, *args):
"""
Writes an error to the log.
Args:
text (str): Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._error_level_enabled: return
message = str(datetime.datetime.now()) + ' [error] ' + text.format(*args) + '\n'
sys.stderr.write(message)
@property
def uses_stdio(self):
"""
Gets a value indicating whether the log writes to stdout/stderr.
Returns:
Always True.
"""
return True
# ---------------------------------------------------------------------------------------------------------------------
class FileLogger(LoggerBase):
"""
A logger that writes messages to a file.
"""
__path = None
def __init__(self, path):
"""
Initializes the object.
Args:
path (str) : Path of the log file to write to.
"""
super().__init__()
self.__path = path
def write_debug(self, text, *args):
"""
Writes a debug message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._debug_level_enabled: return
message = str(datetime.datetime.now()) + ' [debug] ' + text.format(*args) + '\n'
with open(self.__path, "a+", encoding="utf-8") as text_file:
text_file.write(message)
def write_info(self, text, *args):
"""
Writes an informational message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._info_level_enabled: return
message = str(datetime.datetime.now()) + ' [info] ' + text.format(*args) + '\n'
with open(self.__path, "a+", encoding="utf-8") as text_file:
text_file.write(message)
def write_note(self, text, *args):
"""
Writes a note to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._note_level_enabled: return
message = str(datetime.datetime.now()) + ' [note] ' + text.format(*args) + '\n'
with open(self.__path, "a+", encoding="utf-8") as text_file:
text_file.write(message)
def write_warning(self, text, *args):
"""
Writes a warning to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._warning_level_enabled: return
message = str(datetime.datetime.now()) + ' [warning] ' + text.format(*args) + '\n'
with open(self.__path, "a+", encoding="utf-8") as text_file:
text_file.write(message)
def write_error(self, text, *args):
"""
Writes an error to the log.
Args:
text (str): Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._error_level_enabled: return
message = str(datetime.datetime.now()) + ' [error] ' + text.format(*args) + '\n'
with open(self.__path, "a+", encoding="utf-8") as text_file:
text_file.write(message)
# ---------------------------------------------------------------------------------------------------------------------
class SyslogLogger(LoggerBase):
"""
A logger that writes messages to syslog.
"""
__ident = None
__facility = LOG_LOCAL5
def __init__(self):
"""
Initializes the logger.
"""
super().__init__()
# use the container name as ident
self.__ident = "Docker ({0})".format(socket.gethostname())
def write_debug(self, text, *args):
"""
Writes a debug message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._debug_level_enabled: return
message = text.format(*args)
openlog(ident = self.__ident, facility = self.__facility)
syslog(LOG_DEBUG, message)
closelog()
def write_info(self, text, *args):
"""
Writes an informational message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._debug_level_enabled: return
message = text.format(*args)
openlog(ident = self.__ident, facility = self.__facility)
syslog(LOG_INFO, message)
closelog()
def write_note(self, text, *args):
"""
Writes a note to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._note_level_enabled: return
message = text.format(*args)
openlog(ident = self.__ident, facility = self.__facility)
syslog(LOG_NOTICE, message)
closelog()
def write_warning(self, text, *args):
"""
Writes a warning to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._warning_level_enabled: return
message = text.format(*args)
openlog(ident = self.__ident, facility = self.__facility)
syslog(LOG_WARN, message)
closelog()
def write_error(self, text, *args):
"""
Writes an error to the log.
Args:
text (str): Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
if not self._error_level_enabled: return
message = text.format(*args)
openlog(ident = self.__ident, facility = self.__facility)
syslog(LOG_ERR, message)
closelog()
# ---------------------------------------------------------------------------------------------------------------------
class CombinedLogger(LoggerBase):
"""
A logger that combines multiple other loggers.
"""
__loggers = []
def __init__(self, *loggers):
"""
Initializes the combined logger.
Args:
loggers (LoggerBase) : Loggers to combine.
"""
super().__init__()
self.__loggers.extend(loggers)
def write_debug(self, text, *args):
"""
Writes a debug message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
for logger in self.__loggers:
logger.write_debug(text, *args)
def write_info(self, text, *args):
"""
Writes an informational message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
for logger in self.__loggers:
logger.write_info(text, *args)
def write_note(self, text, *args):
"""
Writes a note to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
for logger in self.__loggers:
logger.write_note(text, *args)
def write_warning(self, text, *args):
"""
Writes a warning to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
for logger in self.__loggers:
logger.write_warning(text, *args)
def write_error(self, text, *args):
"""
Writes an error to the log.
Args:
text (str): Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
for logger in self.__loggers:
logger.write_error(text, *args)
def set_verbosity(self, level):
"""
Sets the verbosity of startup system.
Args:
level (int): The minimum severity level of log messages to show:
0 = error only
1 = error and warning
2 = error, warning and note
3 = all messages (error, warning, note, debug)
"""
for logger in self.__loggers:
logger.set_verbosity(level)
@property
def uses_stdio(self):
"""
Gets a value indicating whether the log writes to stdout/stderr.
Returns:
True, if the log writes to stdout/stderr; otherwise False.
"""
for logger in self.__loggers:
use = logger.uses_stdio
if use: return True
return False
def add(self, logger):
"""
Adds a logger to the combined logger.
Args:
logger (LoggerBase) : Logger to add.
"""
if not isinstance(logger, LoggerBase):
raise ValueError("The specified logger does not derive from 'LoggerBase'.")
self.__loggers.append(logger)
# ---------------------------------------------------------------------------------------------------------------------
class Log(object):
"""
The application's log.
"""
__instance = None
@classproperty
def instance(cls):
"""
Gets the singleton instance of the Log.
"""
if not Log.__instance:
Log.__instance = StdioLogger()
return Log.__instance
@instance.setter
def instance(cls, value):
"""
Sets the singleton instance of the Log.
"""
Log.__instance = value
@staticmethod
def write_debug(text, *args):
"""
Writes a debug message to the log.
Args:
text (str) : Text to write to the log.
args (list) : Arguments to use when formatting the text.
"""
Log.instance.write_debug(text, *args)
@staticmethod
def write_info(text, *args):
"""
Writes an informational message to the log.
Args:
text (str) : Text to write to the log.
args (tuple) : Arguments to use when formatting the text.
"""
Log.instance.write_info(text, *args)
@staticmethod
def write_note(text, *args):
"""
Writes a note to the log.
Args:
text (str) : Text to write to the log.
args (list) : Arguments to use when formatting the text.
"""
Log.instance.write_note(text, *args)
@staticmethod
def write_warning(text, *args):
"""
Writes a warning to the log.
Args:
text (str) : Text to write to the log.
args (list) : Arguments to use when formatting the text.
"""
Log.instance.write_warning(text, *args)
@staticmethod
def write_error(text, *args):
"""
Writes an error to the log.
Args:
text (str) : Text to write to the log.
args (list) : Arguments to use when formatting the text.
"""
Log.instance.write_error(text, *args)
@classproperty
def uses_stdio(cls):
"""
Gets a value indicating whether the log writes to stdout/stderr.
Returns:
True, if the log writes to stdout/stderr; otherwise False.
"""
return Log.instance.uses_stdio
@staticmethod
def set_verbosity(level):
"""
Sets the verbosity of startup system.
Args:
level (int): The minimum severity level of log messages to show:
0 = logging disabled
1 = error only
2 = error and warning
3 = error, warning and note
4 = error, warning, note and info
5 = all messages (error, warning, note, info, debug)
"""
Log.instance.set_verbosity(level)
| 25.773826
| 123
| 0.535405
| 2,055
| 18,119
| 4.584428
| 0.087105
| 0.043944
| 0.05095
| 0.076425
| 0.772848
| 0.752786
| 0.737289
| 0.725507
| 0.725507
| 0.725507
| 0
| 0.003065
| 0.333793
| 18,119
| 702
| 124
| 25.810541
| 0.7774
| 0.401291
| 0
| 0.60804
| 0
| 0
| 0.037808
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.226131
| false
| 0
| 0.035176
| 0
| 0.371859
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22e77d185f40f9849cff21518c1293c15710dcfa
| 428
|
py
|
Python
|
project_management/project_management/web_form/tasks/tasks.py
|
ashish-greycube/project_management
|
b77e5c2c737c8b62d2e9a2a4d928c062b9a06e70
|
[
"MIT"
] | null | null | null |
project_management/project_management/web_form/tasks/tasks.py
|
ashish-greycube/project_management
|
b77e5c2c737c8b62d2e9a2a4d928c062b9a06e70
|
[
"MIT"
] | null | null | null |
project_management/project_management/web_form/tasks/tasks.py
|
ashish-greycube/project_management
|
b77e5c2c737c8b62d2e9a2a4d928c062b9a06e70
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
def get_context(context):
pass
# do your magic here
# print('---------',context)
# context.doc=''
# context.doc.task_document_pm_cf = ''
# print(context.doc.name)
# context.update(context.doc.task_document_pm_cf.as_dict())
#
# context.update({'doc.task_document_pm_cf':None})
# context.doc=None
# print('-----------------------',context.doc.task_document_pm_cf)
| 25.176471
| 67
| 0.686916
| 58
| 428
| 4.741379
| 0.431034
| 0.218182
| 0.218182
| 0.247273
| 0.352727
| 0.283636
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10514
| 428
| 16
| 68
| 26.75
| 0.718016
| 0.724299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
22f2432762ae19960b0b68c1c3894667c3a07dc7
| 184
|
py
|
Python
|
Backend/tests/util/test_crypto_hash.py
|
zarif007/Block-Chain-Web-App
|
40bd4d8d8ce1f6de2840792290bf022d7dfacbb4
|
[
"MIT"
] | 1
|
2020-12-30T09:30:23.000Z
|
2020-12-30T09:30:23.000Z
|
Backend/tests/util/test_crypto_hash.py
|
zarif007/Block-Chain-Web-App
|
40bd4d8d8ce1f6de2840792290bf022d7dfacbb4
|
[
"MIT"
] | null | null | null |
Backend/tests/util/test_crypto_hash.py
|
zarif007/Block-Chain-Web-App
|
40bd4d8d8ce1f6de2840792290bf022d7dfacbb4
|
[
"MIT"
] | null | null | null |
from backend.util.crypto_hash import crypto_hash
def test_crypto_hash():
"""should return a hashed value"""
assert crypto_hash(1, [2], 'three') == crypto_hash('three', 1, [2])
| 36.8
| 71
| 0.695652
| 28
| 184
| 4.357143
| 0.607143
| 0.409836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025478
| 0.146739
| 184
| 5
| 71
| 36.8
| 0.751592
| 0.152174
| 0
| 0
| 0
| 0
| 0.066225
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
fe0dea9b1ede297cdf2969922990f485c1357371
| 47
|
py
|
Python
|
app/hello.py
|
gitx-io/ActionServerless-template
|
1824e7ebb993ed50b71dd0233330729a6f1fe9d2
|
[
"Apache-2.0"
] | 2
|
2021-03-23T11:06:28.000Z
|
2021-11-08T12:01:29.000Z
|
app/hello.py
|
gitx-io/ActionServerless-template
|
1824e7ebb993ed50b71dd0233330729a6f1fe9d2
|
[
"Apache-2.0"
] | null | null | null |
app/hello.py
|
gitx-io/ActionServerless-template
|
1824e7ebb993ed50b71dd0233330729a6f1fe9d2
|
[
"Apache-2.0"
] | null | null | null |
# GET /app/hello_world
print("hello world!")
| 9.4
| 22
| 0.680851
| 7
| 47
| 4.428571
| 0.714286
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 4
| 23
| 11.75
| 0.775
| 0.425532
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
fe17959388dd5b51f93a9159f71292328f037d0d
| 9,825
|
py
|
Python
|
test/test_inelasticity.py
|
ajey091/neml
|
23dd2cdb83057fdd17a37fa19f4592c54f821dbf
|
[
"MIT"
] | 6
|
2020-05-06T17:04:29.000Z
|
2021-08-03T20:02:22.000Z
|
test/test_inelasticity.py
|
ajey091/neml
|
23dd2cdb83057fdd17a37fa19f4592c54f821dbf
|
[
"MIT"
] | 66
|
2018-10-26T01:32:43.000Z
|
2022-02-01T03:02:18.000Z
|
test/test_inelasticity.py
|
ajey091/neml
|
23dd2cdb83057fdd17a37fa19f4592c54f821dbf
|
[
"MIT"
] | 14
|
2018-11-28T17:07:24.000Z
|
2022-01-06T16:57:15.000Z
|
#!/usr/bin/env python3
from neml import history, interpolate
from neml.math import tensors, rotations
from neml.cp import crystallography, slipharden, sliprules, inelasticity
from common import differentiate
from nicediff import *
import unittest
import numpy as np
import numpy.linalg as la
class CommonInelastic(object):
def test_d_p_d_stress(self):
nd = diff_symmetric_symmetric(lambda s: self.model.d_p(s, self.Q, self.H, self.L, self.T, self.fixed),
self.S)
d = self.model.d_d_p_d_stress(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertEqual(nd, d)
def test_d_p_d_history(self):
nd = diff_symmetric_history(lambda h: self.model.d_p(self.S, self.Q, h, self.L, self.T, self.fixed),
self.H)
d = np.array(self.model.d_d_p_d_history(self.S, self.Q, self.H, self.L, self.T, self.fixed))
self.assertTrue(np.allclose(d.T.reshape(nd.shape, order = 'F'), nd))
def test_w_p_d_stress(self):
nd = diff_skew_symmetric(lambda s: self.model.w_p(s, self.Q, self.H, self.L, self.T, self.fixed),
self.S)
d = self.model.d_w_p_d_stress(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertEqual(nd, d)
def test_w_p_d_history(self):
nd = diff_skew_history(lambda h: self.model.w_p(self.S, self.Q, h, self.L, self.T, self.fixed),
self.H)
d = np.array(self.model.d_w_p_d_history(self.S, self.Q, self.H, self.L, self.T, self.fixed))
self.assertTrue(np.allclose(d.T.reshape(nd.shape, order = 'F'), nd))
def test_d_hist_rate_d_stress(self):
nd = diff_history_symmetric(lambda s: self.model.history_rate(s, self.Q, self.H, self.L, self.T, self.fixed),
self.S)
d = np.array(self.model.d_history_rate_d_stress(self.S, self.Q, self.H, self.L, self.T, self.fixed))
self.assertTrue(np.allclose(nd.reshape(d.shape), d))
def test_d_hist_rate_d_hist(self):
nd = diff_history_history(lambda h: self.model.history_rate(self.S, self.Q, h, self.L, self.T, self.fixed),
self.H)
d = np.array(self.model.d_history_rate_d_history(self.S, self.Q, self.H, self.L, self.T, self.fixed))
self.assertTrue(np.allclose(nd.reshape(d.shape), d))
class TestNoInelasticity(unittest.TestCase, CommonInelastic):
def setUp(self):
self.model = inelasticity.NoInelasticity()
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.T = 300.0
self.H = history.History()
self.fixed = history.History()
def test_d_p(self):
self.assertEqual(tensors.Symmetric(np.zeros((3,3))),
self.model.d_p(self.S, self.Q, self.H, self.L, self.T,self.fixed))
def test_w_p(self):
self.assertEqual(tensors.Skew(np.zeros((3,3))),
self.model.w_p(self.S, self.Q, self.H, self.L, self.T,self.fixed))
def test_hist_rate(self):
h1 = history.History()
h2 = self.model.history_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertTrue(np.allclose(np.array(h1), np.array(h2)))
class TestAsaroInelasticity(unittest.TestCase, CommonInelastic):
def setUp(self):
self.strength = 35.0
self.H = history.History()
self.H.add_scalar("strength")
self.H.set_scalar("strength", self.strength)
self.tau0 = 10.0
self.tau_sat = 50.0
self.b = 2.5
self.strengthmodel = slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0)
self.g0 = 1.0
self.n = 3.0
self.slipmodel = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)
self.model = inelasticity.AsaroInelasticity(self.slipmodel)
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.T = 300.0
self.fixed = history.History()
def test_d_p(self):
d = tensors.Symmetric(np.zeros((3,3)))
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
d += self.slipmodel.slip(g, i, self.S, self.Q, self.H, self.L, self.T,
self.fixed) * self.L.M(g, i, self.Q)
self.assertEqual(d,
self.model.d_p(self.S, self.Q, self.H, self.L, self.T, self.fixed))
def test_w_p(self):
w = tensors.Skew(np.zeros((3,3)))
for g in range(self.L.ngroup):
for i in range(self.L.nslip(g)):
w += self.slipmodel.slip(g, i, self.S, self.Q, self.H, self.L, self.T,
self.fixed) * self.L.N(g, i, self.Q)
self.assertEqual(w,
self.model.w_p(self.S, self.Q, self.H, self.L, self.T, self.fixed))
def test_hist_rate(self):
h1 = self.slipmodel.hist_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
h2 = self.model.history_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertTrue(np.allclose(np.array(h1), np.array(h2)))
class TestPowerLawInelasticity(unittest.TestCase, CommonInelastic):
def setUp(self):
self.A = 1.0e-2
self.n = 3.1
self.model = inelasticity.PowerLawInelasticity(self.A, self.n)
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.T = 300.0
self.H = history.History()
self.fixed = history.History()
def test_seq(self):
seq1 = np.sqrt(3.0/2.0) * self.S.dev().norm()
seq2 = np.sqrt(3.0/2.0 * self.S.dev().contract(self.S.dev()))
self.assertTrue(np.isclose(seq1, seq2))
def test_d_p(self):
seq = np.sqrt(3.0/2.0) * self.S.dev().norm()
Dp1 = self.A*seq**self.n * self.S.dev() / seq
Dp2 = self.model.d_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertEqual(Dp1, Dp2)
def test_w_p(self):
self.assertEqual(tensors.Skew(np.zeros((3,3))),
self.model.w_p(self.S, self.Q, self.H, self.L, self.T, self.fixed))
def test_hist_rate(self):
h1 = history.History()
h2 = self.model.history_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertTrue(np.allclose(np.array(h1), np.array(h2)))
class TestCombinedInelasticity(unittest.TestCase, CommonInelastic):
def setUp(self):
self.A = 1.0e-5
self.n = 3.1
self.model1 = inelasticity.PowerLawInelasticity(self.A, self.n)
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.T = 300.0
self.strength = 35.0
self.H = history.History()
self.H.add_scalar("strength")
self.H.set_scalar("strength", self.strength)
self.tau0 = 10.0
self.tau_sat = 50.0
self.b = 2.5
self.strengthmodel = slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0)
self.g0 = 1.0
self.n = 3.0
self.slipmodel = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)
self.model2 = inelasticity.AsaroInelasticity(self.slipmodel)
self.model = inelasticity.CombinedInelasticity([self.model1, self.model2])
self.fixed = history.History()
def test_d_p(self):
dp1 = self.model1.d_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
dp2 = self.model2.d_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
dp = self.model.d_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertEqual(dp1+dp2,dp)
def test_w_p(self):
wp1 = self.model1.w_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
wp2 = self.model2.w_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
wp = self.model.w_p(self.S, self.Q, self.H, self.L, self.T, self.fixed)
self.assertEqual(wp1+wp2,wp)
def test_hist_rate(self):
h1 = self.model1.history_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
h2 = self.model2.history_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
h = self.model.history_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)
h3 = history.History()
h3.add_union(h1)
h3.add_union(h2)
self.assertTrue(np.allclose(np.array(h), np.array(h3)))
class TestComplexInelasticity(unittest.TestCase, CommonInelastic):
def setUp(self):
self.strength_0 = 35.0
self.H = history.History()
self.H.add_scalar("strength0")
self.H.set_scalar("strength0", self.strength_0)
self.strength_1 = 25.0
self.H.add_scalar("strength1")
self.H.set_scalar("strength1", self.strength_1)
self.tau0_0 = 10.0
self.tau_sat_0 = 50.0
self.b_0 = 2.5
self.tau0_1 = 5.0
self.tau_sat_1 = 25.0
self.b_1 = 1.0
self.strengthmodel = slipharden.SumSlipSingleStrengthHardening(
[
slipharden.VoceSlipHardening(self.tau_sat_0, self.b_0, self.tau0_0),
slipharden.VoceSlipHardening(self.tau_sat_1, self.b_1, self.tau0_1)
])
self.g0 = 1.0
self.n = 3.0
self.slipmodel = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)
self.model = inelasticity.AsaroInelasticity(self.slipmodel)
self.L = crystallography.CubicLattice(1.0)
self.L.add_slip_system([1,1,0],[1,1,1])
self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = "degrees")
self.S = tensors.Symmetric(np.array([
[100.0,-25.0,10.0],
[-25.0,-17.0,15.0],
[10.0, 15.0,35.0]]))
self.T = 300.0
self.fixed = history.History()
| 33.192568
| 113
| 0.650687
| 1,697
| 9,825
| 3.672363
| 0.08132
| 0.039313
| 0.031772
| 0.052953
| 0.828466
| 0.774069
| 0.736842
| 0.724647
| 0.705712
| 0.684211
| 0
| 0.051228
| 0.17944
| 9,825
| 295
| 114
| 33.305085
| 0.721781
| 0.002137
| 0
| 0.584112
| 0
| 0
| 0.010711
| 0
| 0
| 0
| 0
| 0
| 0.088785
| 1
| 0.11215
| false
| 0
| 0.037383
| 0
| 0.17757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3e69312873ac300ec24d176f3669d83bfaa4666
| 49,643
|
py
|
Python
|
pytests/subdoc/subdoc_error_handling.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 14
|
2015-02-06T02:47:57.000Z
|
2020-03-14T15:06:05.000Z
|
pytests/subdoc/subdoc_error_handling.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 3
|
2019-02-27T19:29:11.000Z
|
2021-06-02T02:14:27.000Z
|
pytests/subdoc/subdoc_error_handling.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 108
|
2015-03-26T08:58:49.000Z
|
2022-03-21T05:21:39.000Z
|
from lib.mc_bin_client import MemcachedClient, MemcachedError
from lib.memcacheConstants import *
from .subdoc_base import SubdocBaseTest
import copy, json
import sys
import random
class SubdocErrorHandling(SubdocBaseTest):
def setUp(self):
super(SubdocErrorHandling, self).setUp()
self.nesting_level = self.input.param("nesting_level", 0)
self.client = self.direct_client(self.master, self.buckets[0])
def tearDown(self):
super(SubdocErrorHandling, self).tearDown()
def test_error_get_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
#self.client.get_sd("simple_data","crap")
self.log.info("simple_data :: path does not exist")
self.error_gets("simple_data", "does_not_exist", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist - dictionary", result = result)
self.log.info("simple_data :: malformed path")
self.error_gets("simple_data", "{][]}", error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result)
self.log.info("simple_data :: path does not exist - array, out of bounds index")
self.error_gets("simple_data", "array[200]", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist - array, out of bounds index", result = result)
self.log.info("simple_data :: document does not exist")
self.error_gets("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_get_nested_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Nested Data
self.log.info("nested_data :: path does not exist")
new_path = self.generate_path(20, "does_not_exist")
self.error_gets("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist - dictionary", result = result)
self.log.info("nested_data ::path does not exist - array, out of bounds index")
new_path = self.generate_path(20, "array[200]")
self.error_gets("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist - array, out of bounds index", result = result)
self.log.info("nested_data ::malformed path")
new_path = self.generate_path(20, "{[]}")
self.error_gets("normal_nested_data", new_path, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_gets("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_exists_nested_data(self):
result = {}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Nested Data Set
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{][]}")
self.error_exists("normal_nested_data", new_path, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
self.log.info("nested_data :: path does not exist")
new_path = self.generate_path(20, "does_not_exist")
self.error_exists("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist malformed path", result = result)
self.log.info("nested_data ::path does not exist - array, out of bounds index")
new_path = self.generate_path(20, "array[200]")
self.error_exists("normal_nested_data", new_path, error = "Memcached error #192 'Path not exists'", field = "nested_data : path does not exist - array, out of bounds index", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_exists("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_exists_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: path does not exist")
self.error_exists("simple_data", "does_not_exist", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist ", result = result)
self.log.info("simple_data :: path does not exist - array, out of bounds index")
self.error_exists("simple_data", "array[200]", error = "Memcached error #192 'Path not exists'", field = "simple_data : path does not exist - array, out of bounds index", result = result)
self.log.info("simple_data :: document does not exist")
self.error_exists("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.log.info("simple_data :: malformed path")
self.error_exists("simple_data", "[]{}]", error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_add_dict_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: path exists")
self.error_add_dict("simple_data", "field", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data :: path exists", result = result)
self.log.info("simple_data :: inserting into an array")
self.error_add_dict("simple_data", "array[0]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data :: inserting into an array", result = result)
self.log.info("simple_data :: empty path does not exist")
self.error_add_dict("simple_data", "{][]}", value = "value_value", error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result)
self.assertTrue(len(result) == 0, result)
self.error_add_dict("simple_data", "", value = "value_value", error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.log.info("simple_data :: malformed path")
def test_error_add_dict_nested_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("nested_data :: malformed json")
new_path = self.generate_path(20, "field_1")
self.error_add_dict("normal_nested_data", new_path, value = {"data"}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result)
self.log.info("nested_data :: path exists")
new_path = self.generate_path(20, "field")
self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path exists", result = result)
self.log.info("nested_data :: inserting into an array")
new_path = self.generate_path(20, "array[0]")
self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : inserting into an array", result = result)
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{}][")
self.error_add_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_add_dict("nested_data", new_path, value = "value_value", error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_upsert_dict_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: insertion into array")
self.error_upsert_dict("simple_data", "array[0]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data : insertion into array", result = result)
self.log.info("simple_data :: empty path does not exist")
self.error_upsert_dict("simple_data", "", value = "value_value", error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_upsert_dict_nested_data(self):
result = {}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Nested Data Set
self.log.info("nested_data :: malformed json")
new_path = self.generate_path(20, "field_1")
self.error_upsert_dict("normal_nested_data", new_path, value = {10}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result)
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_upsert_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: inserting into array")
new_path = self.generate_path(20, "array[0]")
self.error_upsert_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : inserting into array", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{}}[0]")
self.error_upsert_dict("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed path", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_upsert_dict("nested_data", new_path, value = "value_value", error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_replace_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: document does not exist")
self.error_replace("does_not_exist", "does_not_exist", value = "value_value", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.log.info("simple_data :: path does not exist - array, negavtie index")
self.error_replace("simple_data", "array[-1]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data : path does not exist - array, negavtie index", result = result)
self.log.info("simple_data :: path does not exist - array, out of bounds index")
self.error_replace("simple_data", "array[200]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data : path does not exist - array, out of bounds index", result = result)
self.log.info("simple_data :: empty path does not exist")
self.error_replace("simple_data", "", value = "value_value", error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_replace_nested_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_replace("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: path does not exist - array, negavtie index")
new_path = self.generate_path(20, "array[-1]")
self.error_replace("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path does not exist - array, negavtie index", result = result)
self.log.info("nested_data :: path does not exist - array, out of bounds index")
new_path = self.generate_path(20, "array[200]")
self.error_replace("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path does not exist - array, out of bounds index", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{][]}")
self.error_replace("normal_nested_data", new_path, value = "value_value", error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
self.log.info("nested_data :: malformed json")
new_path = self.generate_path(20, "field")
self.error_replace("normal_nested_data", new_path, value = {10}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_replace("nested_data", new_path, value = "value_value", error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_delete_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: empty path does not exist")
self.error_delete("simple_data", "", value = "value_value", error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.log.info("simple_data :: document does not exist")
self.error_delete("does_not_exist", "does_not_exist", value = "value_value", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.log.info("simple_data :: path does not exist - array, negavtie index")
self.error_delete("simple_data", "array[-1]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data : path does not exist - array, negavtie index", result = result)
self.log.info("simple_data :: path does not exist - array, out of bounds index")
self.error_delete("simple_data", "array[200]", value = "value_value", error = "Memcached error #197 'Cant insert'", field = "simple_data : path does not exist - array, out of bounds index", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_delete_nested_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_delete("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: path does not exist - array, negavtie index")
new_path = self.generate_path(20, "array[-1]")
self.error_delete("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path does not exist - array, negavtie index", result = result)
self.log.info("nested_data :: path does not exist - array, out of bounds index")
new_path = self.generate_path(20, "array[200]")
self.error_delete("normal_nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path does not exist - array, out of bounds index", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{][]}")
self.error_delete("normal_nested_data", new_path, value = "value_value", error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "array")
self.error_delete("nested_data", new_path, value = "value_value", error = "Memcached error #197 'Cant insert'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_push_last_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: empty path does not exist")
self.error_array_push_last("simple_data", "", error = "Memcached error #193 'Path mismatch'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.log.info("simple_data :: not an array path does not exist")
self.error_array_push_last("simple_data", "field", error = "Memcached error #193 'Path mismatch'", field = "simple_data : not an array path does not exist - dictionary", result = result)
self.log.info("simple_data :: document does not exist")
self.error_array_push_last("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "array")
self.error_array_push_last("nested_data", new_path, error = "Memcached error #1 'Not found'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_push_last_nested_data(self):
result = {}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Nested Data Set
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_array_push_last("normal_nested_data", new_path, value = 10, error = "Memcached error #193 'Path mismatch'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "[][\|}{")
self.error_array_push_last("normal_nested_data", new_path, value = 10, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
self.log.info("nested_data :: malformed json")
new_path = self.generate_path(20, "array")
self.error_array_push_last("normal_nested_data", new_path, value = {10}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "array")
self.error_array_push_last("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_push_first_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: empty path does not exist")
self.error_array_push_first("simple_data", "", value =1, error = "Memcached error #193 'Path mismatch'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.log.info("simple_data :: not an array path does not exist")
self.error_array_push_first("simple_data", "field", value =1, error = "Memcached error #193 'Path mismatch'", field = "simple_data : not an array path does not exist - dictionary", result = result)
self.log.info("simple_data :: document does not exist")
self.error_array_push_first("does_not_exist", "does_not_exist", value =1, error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_push_first_nested_data(self):
result = {}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Nested Data Set
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_array_push_first("normal_nested_data", new_path, value = 10, error = "Memcached error #193 'Path mismatch'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{{]\{}[")
self.error_array_push_first("normal_nested_data", new_path, value =10, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
self.log.info("nested_data :: malformed json")
new_path = self.generate_path(20, "array")
self.error_array_push_first("normal_nested_data", new_path, value = {10}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "array")
self.error_array_push_first("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_push_unique_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2, {}]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: empty path does not exist")
self.error_array_add_unique("simple_data", "", value=2, error = "Memcached error #193 'Path mismatch'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.log.info("simple_data :: not an array path does not exist")
self.error_array_add_unique("simple_data", "field", value=2, error = "Memcached error #193 'Path mismatch'", field = "simple_data : not an array path does not exist - dictionary", result = result)
self.log.info("simple_data :: unique value exists")
self.error_array_add_unique("simple_data", "array", value=2, error = "Memcached error #193 'Path mismatch'", field = "simple_data : unique value exists - dictionary", result = result)
self.log.info("simple_data :: document does not exist")
self.error_array_add_unique("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_push_unique_nested_data(self):
result = {}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("nested_data :: unique value exists")
new_path = self.generate_path(20, "array")
self.error_array_add_unique("normal_nested_data", new_path, value=2, error = "Memcached error #193 'Path mismatch'", field = "simple_data : unique value exists - dictionary", result = result)
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_array_add_unique("normal_nested_data", new_path, value=2, error = "Memcached error #193 'Path mismatch'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "{}][\P")
self.error_array_add_unique("normal_nested_data", new_path, value=2, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
self.log.info("nested_data :: malformed json")
new_path = self.generate_path(20, "array")
self.error_array_add_unique("normal_nested_data", new_path, value= {10}, error = "Memcached error #197 'Cant insert'", field = "nested_data : malformed json", result = result)
self.log.info("nested_data :: collision - already present json structure")
new_path = self.generate_path(20, "array")
self.error_array_add_unique("normal_nested_data", new_path, value= {}, error = "Memcached error #197 'Cant insert'", field = "nested_data : collision - already present json structure", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_array_add_unique("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_add_insert_simple_data(self):
result = {}
simple_data = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: not an array path does not exist")
self.error_array_add_insert("simple_data", "field", value=2, error = "Memcached error #194 'Invalid path'", field = "simple_data : not an array path does not exist - dictionary", result = result)
self.log.info("simple_data :: negative index value")
self.error_array_add_insert("simple_data", "array[-1]", value=2, error = "Memcached error #194 'Invalid path'", field = "simple_data : negative value - dictionary", result = result)
self.log.info("simple_data :: out of bounds index value")
self.error_array_add_insert("simple_data", "array[200]", value=2, error = "Memcached error #192 'Path not exists'", field = "simple_data : out of bounds index value - dictionary", result = result)
self.log.info("simple_data :: document does not exist")
self.error_array_add_insert("does_not_exist", "does_not_exist", error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.log.info("simple_data :: empty path does not exist")
self.error_array_add_insert("simple_data", "", value=2, error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_array_add_insert_nested_data(self):
result = {}
nested_simple = {
"field":"simple",
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Nested Data Set
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_array_add_insert("normal_nested_data", new_path, value=2, error = "Memcached error #194 'Invalid path'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("simple_data :: out of bounds index value")
new_path = self.generate_path(20, "array[200]")
self.error_array_add_insert("normal_nested_data", new_path, value=2, error = "Memcached error #192 'Path not exists'", field = "simple_data : out of bounds index value - dictionary", result = result)
self.log.info("simple_data :: malformed path")
new_path = self.generate_path(20, "{][[e]]}")
self.error_array_add_insert("normal_nested_data", new_path, value=2, error = "Memcached error #194 'Invalid path'", field = "simple_data : malformed path", result = result)
self.log.info("simple_data :: malformed json")
new_path = self.generate_path(20, "array[0]")
self.error_array_add_insert("normal_nested_data", new_path, value={10}, error = "Memcached error #197 'Cant insert'", field = "simple_data : malformed json", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_array_add_insert("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_counter_simple_data(self):
result = {}
simple_data = {
"integer":1,
"double":1.0,
"array":[1, 2]
}
# Add Simple Data
jsonDump = json.dumps(simple_data)
self.client.set("simple_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("simple_data :: document does not exist")
self.error_counter("does_not_exist", "does_not_exist", value = 1, error = "Memcached error #1 'Not found'", field = "simple_data : document does not exist", result = result)
self.log.info("simple_data :: empty path does not exist")
self.error_counter("simple_data", "", value = 1, error = "Memcached error #4 'Invalid'", field = "simple_data : empty path does not exist - dictionary", result = result)
self.assertTrue(len(result) == 0, result)
def test_error_counter_nested_data(self):
result = {}
nested_simple = {
"integer":1,
"double":1.0,
"array":[{"field":"exists"}, 1, 2]
}
# Add Normal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 40)
jsonDump = json.dumps(nested_json)
self.client.set("nested_data", 0, 0, jsonDump)
# Add Abnormal Nested Data
base_json = self.generate_json_for_nesting()
nested_json = self.generate_nested(base_json, nested_simple, 20)
jsonDump = json.dumps(nested_json)
self.client.set("normal_nested_data", 0, 0, jsonDump)
# Tests for Simple Data Set
self.log.info("nested_data :: counter to a double")
new_path = self.generate_path(20, "double")
self.error_counter("normal_nested_data", new_path, 1.0, error = "Memcached error #200 'Delta out of range'", field = "nested_data : counter to a double - dictionary", result = result)
self.log.info("nested_data :: integer overflow")
new_path = self.generate_path(20, "integer")
self.error_counter("normal_nested_data", new_path, sys.maxsize, error = "Memcached error #197 'Cant insert'", field = "nested_data : integer overflow - dictionary", result = result)
self.log.info("nested_data :: empty path does not exist")
new_path = self.generate_path(20, "")
self.error_counter("normal_nested_data", new_path, error = "Memcached error #193 'Path mismatch'", field = "nested_data : empty path does not exist - dictionary", result = result)
self.log.info("nested_data :: malformed path")
new_path = self.generate_path(20, "[]{}\][")
self.error_counter("normal_nested_data", new_path, error = "Memcached error #194 'Invalid path'", field = "nested_data : malformed path", result = result)
# Tests for Nested Data with long path
self.log.info("long_nested_data ::nested_data : path does not exist - too big path")
new_path = self.generate_path(40, "field")
self.error_counter("nested_data", new_path, error = "Memcached error #195 'Path too big'", field = "nested_data : path does not exist - too big path", result = result)
self.assertTrue(len(result) == 0, result)
def error_exists(self, in_key, path, error = "error", field = "field", result = {}):
try:
self.client.exists_sd(in_key, path)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_gets(self, in_key, path, error = "error", field = "field", result = {}):
try:
self.client.get_sd(in_key, path)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_add_dict(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.dict_add_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_upsert_dict(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.dict_upsert_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_array_push_last(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.array_push_last_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_array_push_first(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.array_push_first_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_array_add_unique(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.array_add_unique_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_array_add_insert(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.array_add_insert_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_replace(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.replace_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_delete(self, in_key, path, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.delete_sd(in_key, path)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
def error_counter(self, in_key, path, value = 10, error = "error", field = "field", result = {}):
try:
opaque, cas, data = self.client.counter_sd(in_key, path, value)
result[field] = "There were no errors. Error expected: %s" % error
except Exception as ex:
if (str(ex).find(error) == -1):
self.log.info(str(ex))
result[field] = "Error is incorrect.Actual %s.Expected: %s." %(str(ex), error)
self.client = self.direct_client(self.master, self.buckets[0])
| 64.138243
| 218
| 0.630119
| 6,473
| 49,643
| 4.63958
| 0.022092
| 0.081247
| 0.05594
| 0.053277
| 0.977591
| 0.974094
| 0.962673
| 0.956413
| 0.949887
| 0.943627
| 0
| 0.018396
| 0.243337
| 49,643
| 774
| 219
| 64.138243
| 0.781114
| 0.036118
| 0
| 0.690332
| 0
| 0
| 0.336005
| 0
| 0
| 0
| 0
| 0
| 0.033233
| 1
| 0.05287
| false
| 0
| 0.009063
| 0
| 0.063444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3fbc6fd6a1aee511f215b53f994fc125e89fac9
| 472
|
py
|
Python
|
ddf/__init__.py
|
timgates42/django-dynamic-fixture
|
f64521fdd81110c26a5ed78bf79891c7af4cf2ff
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ddf/__init__.py
|
timgates42/django-dynamic-fixture
|
f64521fdd81110c26a5ed78bf79891c7af4cf2ff
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ddf/__init__.py
|
timgates42/django-dynamic-fixture
|
f64521fdd81110c26a5ed78bf79891c7af4cf2ff
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-04-22T16:59:11.000Z
|
2020-04-22T16:59:11.000Z
|
# Short alias to use: # `from ddf import *` instead of `from django_dynamic_fixture import *`
from django_dynamic_fixture import N, G, F, C, P, PRE_SAVE, POST_SAVE, __version__
from django_dynamic_fixture import new, get, fixture, teach, look_up_alias
from django_dynamic_fixture.decorators import skip_for_database, only_for_database
from django_dynamic_fixture.fdf import FileSystemDjangoTestCase
from django_dynamic_fixture.script_ddf_checkings import ddf_check_models
| 67.428571
| 93
| 0.847458
| 71
| 472
| 5.239437
| 0.521127
| 0.16129
| 0.274194
| 0.387097
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 472
| 6
| 94
| 78.666667
| 0.877358
| 0.190678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4323d39664750359deff55a3bb2dfc14d0655edd
| 35
|
py
|
Python
|
solaris/vector/__init__.py
|
rbavery/solaris
|
0d7bd1439a96c243d7810fcddf776b7e635a05ea
|
[
"Apache-2.0"
] | 367
|
2019-05-05T22:09:39.000Z
|
2022-03-27T10:05:16.000Z
|
3-SatShipAI/solaris/vector/__init__.py
|
Z-Zheng/SpaceNet_SAR_Buildings_Solutions
|
6a9c3962d987d985384d0d41a187f5fbfadac82c
|
[
"Apache-2.0"
] | 396
|
2019-04-30T21:51:12.000Z
|
2022-03-31T09:21:09.000Z
|
3-SatShipAI/solaris/vector/__init__.py
|
Z-Zheng/SpaceNet_SAR_Buildings_Solutions
|
6a9c3962d987d985384d0d41a187f5fbfadac82c
|
[
"Apache-2.0"
] | 120
|
2019-06-29T20:20:08.000Z
|
2022-03-10T07:37:57.000Z
|
from . import graph, mask, polygon
| 17.5
| 34
| 0.742857
| 5
| 35
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 35
| 1
| 35
| 35
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4348ecd2ff8b57ebfb9159bb4f9d1ae9e93757e7
| 47
|
py
|
Python
|
backend/tests/locust/locustfile.py
|
didi/MeetDot
|
a57009d30c1347a9b85950c2e02b77685ce63952
|
[
"Apache-2.0"
] | 6
|
2021-09-23T14:53:58.000Z
|
2022-02-18T10:14:17.000Z
|
backend/tests/locust/locustfile.py
|
didi/MeetDot
|
a57009d30c1347a9b85950c2e02b77685ce63952
|
[
"Apache-2.0"
] | null | null | null |
backend/tests/locust/locustfile.py
|
didi/MeetDot
|
a57009d30c1347a9b85950c2e02b77685ce63952
|
[
"Apache-2.0"
] | 1
|
2021-09-24T02:48:50.000Z
|
2021-09-24T02:48:50.000Z
|
from users import CreateRoomUser, JoinRoomUser
| 23.5
| 46
| 0.87234
| 5
| 47
| 8.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a36949d4b60e99abfc9f9e298e4d79de17f00d2
| 173
|
py
|
Python
|
IndexHome/admin.py
|
Developer-R-7/CaffeineCode
|
1a489ef0da669dd6f7e5b1d80a3c6046e2e7b2fe
|
[
"MIT"
] | 1
|
2022-02-03T18:42:52.000Z
|
2022-02-03T18:42:52.000Z
|
IndexHome/admin.py
|
Developer-R-7/CaffeineCode
|
1a489ef0da669dd6f7e5b1d80a3c6046e2e7b2fe
|
[
"MIT"
] | null | null | null |
IndexHome/admin.py
|
Developer-R-7/CaffeineCode
|
1a489ef0da669dd6f7e5b1d80a3c6046e2e7b2fe
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Contact, Newsletter, Profile
admin.site.register(Profile)
admin.site.register(Newsletter)
admin.site.register(Contact)
| 24.714286
| 48
| 0.82659
| 23
| 173
| 6.217391
| 0.478261
| 0.188811
| 0.356643
| 0.335664
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080925
| 173
| 6
| 49
| 28.833333
| 0.899371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4a931c6077bd207610cd8d541bd69f07aa1a01d8
| 70
|
py
|
Python
|
data_to_model/models/__init__.py
|
dmitriiweb/data2model
|
42331176792f6fe606f45f54c8ed55afb376b193
|
[
"MIT"
] | null | null | null |
data_to_model/models/__init__.py
|
dmitriiweb/data2model
|
42331176792f6fe606f45f54c8ed55afb376b193
|
[
"MIT"
] | null | null | null |
data_to_model/models/__init__.py
|
dmitriiweb/data2model
|
42331176792f6fe606f45f54c8ed55afb376b193
|
[
"MIT"
] | null | null | null |
from .class_data import ClassData
from .class_field import ClassField
| 23.333333
| 35
| 0.857143
| 10
| 70
| 5.8
| 0.7
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 70
| 2
| 36
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4ab7c4f14f96ba85326b3289d42a1ac40c50f039
| 10,256
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_rot90_op.py
|
wanghuancoder/Paddle
|
8f2b0860ebe4bd5998c97dfaf2a29702ffd2b52a
|
[
"Apache-2.0"
] | 1
|
2021-12-27T02:39:31.000Z
|
2021-12-27T02:39:31.000Z
|
python/paddle/fluid/tests/unittests/test_rot90_op.py
|
wanghuancoder/Paddle
|
8f2b0860ebe4bd5998c97dfaf2a29702ffd2b52a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_rot90_op.py
|
wanghuancoder/Paddle
|
8f2b0860ebe4bd5998c97dfaf2a29702ffd2b52a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
class TestRot90_API(unittest.TestCase):
"""Test rot90 api."""
def test_static_graph(self):
paddle.enable_static()
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=1, axes=[0, 1])
output = paddle.rot90(output, k=1, axes=[0, 1])
output = output.rot90(k=1, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_static_k_0(self):
paddle.enable_static()
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=0, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_static_k_2(self):
paddle.enable_static()
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=2, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[6, 5, 4], [3, 2, 1]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_static_k_3(self):
paddle.enable_static()
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=3, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_static_neg_k_1(self):
paddle.enable_static()
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=-1, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_static_neg_k_2(self):
paddle.enable_static()
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=-2, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[6, 5, 4], [3, 2, 1]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_static_neg_k_3(self):
paddle.enable_static()
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=-3, axes=[0, 1])
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
res = exe.run(train_program,
feed={'input': img},
fetch_list=[output])
out_np = np.array(res[0])
out_ref = np.array([[3, 6], [2, 5], [1, 4]]).astype(np.float32)
self.assertTrue(
(out_np == out_ref).all(),
msg='rot90 output is wrong, out =' + str(out_np))
def test_error_api(self):
paddle.enable_static()
## dims error
def run1():
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=1, axes=[0])
self.assertRaises(ValueError, run1)
## input dims error
def run2():
input = fluid.data(name='input', dtype='float32', shape=[2])
output = paddle.rot90(input, k=1, axes=[0, 1])
self.assertRaises(ValueError, run2)
def run3():
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=1, axes=[0, 0])
self.assertRaises(ValueError, run3)
def run4():
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=1, axes=[3, 1])
self.assertRaises(ValueError, run4)
def run5():
input = fluid.data(name='input', dtype='float32', shape=[2, 3])
output = paddle.rot90(input, k=1, axes=[0, 3])
self.assertRaises(ValueError, run5)
def test_dygraph(self):
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
with fluid.dygraph.guard():
inputs = fluid.dygraph.to_variable(img)
ret = paddle.rot90(inputs, k=1, axes=[0, 1])
ret = ret.rot90(1, axes=[0, 1])
ret = paddle.rot90(ret, k=1, axes=[0, 1])
out_ref = np.array([[4, 1], [5, 2], [6, 3]]).astype(np.float32)
self.assertTrue(
(ret.numpy() == out_ref).all(),
msg='rot90 output is wrong, out =' + str(ret.numpy()))
if __name__ == "__main__":
unittest.main()
| 38.996198
| 75
| 0.543487
| 1,301
| 10,256
| 4.15834
| 0.110684
| 0.009612
| 0.04658
| 0.059889
| 0.789094
| 0.783734
| 0.780037
| 0.780037
| 0.780037
| 0.766913
| 0
| 0.048287
| 0.311427
| 10,256
| 262
| 76
| 39.145038
| 0.717785
| 0.061427
| 0
| 0.742424
| 0
| 0
| 0.050292
| 0
| 0
| 0
| 0
| 0
| 0.065657
| 1
| 0.070707
| false
| 0
| 0.035354
| 0
| 0.111111
| 0.005051
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4abbb7862c6a23604811cccd157e0b4f77b88c4b
| 71,089
|
py
|
Python
|
eran/ELINA/python_interface/fppoly.py
|
pauls658/ReluDiff-ICSE2020-Artifact
|
212854fe04f482183c239e5dfec70106a9a83df8
|
[
"Apache-2.0"
] | 7
|
2020-01-27T21:25:49.000Z
|
2022-01-07T04:37:37.000Z
|
eran/ELINA/python_interface/fppoly.py
|
yqtianust/ReluDiff-ICSE2020-Artifact
|
149f6efe4799602db749faa576980c36921a07c7
|
[
"Apache-2.0"
] | 1
|
2022-01-25T17:41:54.000Z
|
2022-01-26T02:27:51.000Z
|
eran/ELINA/python_interface/fppoly.py
|
yqtianust/ReluDiff-ICSE2020-Artifact
|
149f6efe4799602db749faa576980c36921a07c7
|
[
"Apache-2.0"
] | 3
|
2020-03-14T17:12:17.000Z
|
2022-03-16T09:50:46.000Z
|
#
#
# This source file is part of ELINA (ETH LIbrary for Numerical Analysis).
# ELINA is Copyright © 2019 Department of Computer Science, ETH Zurich
# This software is distributed under GNU Lesser General Public License Version 3.0.
# For more information, see the ELINA project website at:
# http://elina.ethz.ch
#
# THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT ANY WARRANTY OF ANY KIND, EITHER
# EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO ANY WARRANTY
# THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS OR BE ERROR-FREE AND ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
# TITLE, OR NON-INFRINGEMENT. IN NO EVENT SHALL ETH ZURICH BE LIABLE FOR ANY
# DAMAGES, INCLUDING BUT NOT LIMITED TO DIRECT, INDIRECT,
# SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN
# ANY WAY CONNECTED WITH THIS SOFTWARE (WHETHER OR NOT BASED UPON WARRANTY,
# CONTRACT, TORT OR OTHERWISE).
#
#
from fppoly_imports import *
from elina_manager_h import *
from elina_abstract0_h import *
from elina_interval_h import *
from elina_linexpr0_h import *
import numpy as np
from numpy.ctypeslib import ndpointer
import ctypes
_doublepp = ndpointer(dtype=np.uintp, ndim=1, flags='C')
# ====================================================================== #
# Basics
# ====================================================================== #
def fppoly_manager_alloc():
"""
Allocates an ElinaManager.
Returns
-------
man : ElinaManagerPtr
Pointer to the newly allocated ElinaManager.
"""
man = None
try:
fppoly_manager_alloc_c = fppoly_api.fppoly_manager_alloc
fppoly_manager_alloc_c.restype = ElinaManagerPtr
fppoly_manager_alloc_c.argtypes = None
man = fppoly_manager_alloc_c()
except:
print('Problem with loading/calling "fppoly_manager_alloc" from "libfppoly.so"')
return man
def fppoly_from_network_input(man, intdim, realdim, inf_array, sup_array):
"""
Create an abstract element from perturbed input
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
intdim : c_size_t
Number of integer variables.
realdim: c_size_t
Number of real variables
inf_array: POINTER(double)
lower bound array
sup_array: POINTER(double)
upper bound array
Returns
-------
res: ElinaAbstract0Ptr
Pointer to the new abstract object
"""
res = None
try:
fppoly_from_network_input_c = fppoly_api.fppoly_from_network_input
fppoly_from_network_input_c.restype = ElinaAbstract0Ptr
fppoly_from_network_input_c.argtypes = [ElinaManagerPtr, c_size_t, c_size_t,ndpointer(ctypes.c_double),ndpointer(ctypes.c_double)]
res = fppoly_from_network_input_c(man,intdim, realdim, inf_array,sup_array)
except Exception as inst:
print('Problem with loading/calling "fppoly_from_network_input" from "libfppoly.so"')
print(inst)
return res
def fppoly_set_network_input_box(man, element, intdim, realdim, inf_array, sup_array):
"""
Create an abstract element from perturbed input
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element: ElinaAbstract0Ptr
Pointer to the abstract object
intdim : c_size_t
Number of integer variables.
realdim: c_size_t
Number of real variables
inf_array: POINTER(double)
lower bound array
sup_array: POINTER(double)
upper bound array
Returns
-------
res: ElinaAbstract0Ptr
Pointer to the new abstract object
"""
res = None
try:
fppoly_set_network_input_box_c = fppoly_api.fppoly_set_network_input_box
fppoly_set_network_input_box_c.restype = None
fppoly_set_network_input_box_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t,ndpointer(ctypes.c_double),ndpointer(ctypes.c_double)]
res = fppoly_set_network_input_box_c(man,element, intdim, realdim, inf_array,sup_array)
except Exception as inst:
print('Problem with loading/calling "fppoly_set_network_input_box" from "libfppoly.so"')
print(inst)
return res
def fppoly_from_network_input_poly(man, intdim, realdim, inf_array, sup_array, lexpr_weights, lexpr_cst, lexpr_dim, uexpr_weights, uexpr_cst, uexpr_dim, expr_size):
"""
Create an abstract element from perturbed input
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
intdim : c_size_t
Number of integer variables.
realdim: c_size_t
Number of real variables
inf_array: POINTER(double)
lower bound array
sup_array: POINTER(double)
upper bound array
lexpr_weights: POINTER(double)
coefficients of the lower polyhedra constraints
lexpr_cst: POINTER(double)
constants of the lower polyhedra constraints
lexpr_dim: POINTER(c_size_t)
the indexes of the variables in the lower polyhedra constraints
uexpr_weights: POINTER(double)
coefficients of the upper polyhedra constraints
uexpr_cst: POINTER(double)
constants of the upper polyhedra constraints
uexpr_dim: POINTER(c_size_t)
the indexes of the variables in the upper polyhedra constraints
expr_size: c_size_t
size of the polyhedra constraints
Returns
-------
res: ElinaAbstract0Ptr
Pointer to the new abstract object
"""
res = None
try:
fppoly_from_network_input_poly_c = fppoly_api.fppoly_from_network_input_poly
fppoly_from_network_input_poly_c.restype = ElinaAbstract0Ptr
fppoly_from_network_input_poly_c.argtypes = [ElinaManagerPtr, c_size_t, c_size_t,ndpointer(ctypes.c_double),ndpointer(ctypes.c_double),ndpointer(ctypes.c_double),ndpointer(ctypes.c_double),ndpointer(ctypes.c_size_t),ndpointer(ctypes.c_double),ndpointer(ctypes.c_double),ndpointer(ctypes.c_size_t), c_size_t]
res = fppoly_from_network_input_poly_c(man,intdim, realdim, inf_array,sup_array, lexpr_weights, lexpr_cst, lexpr_dim, uexpr_weights, uexpr_cst, uexpr_dim ,expr_size)
except Exception as inst:
print('Problem with loading/calling "fppoly_from_network_input_poly" from "libfppoly.so"')
print(inst)
return res
def ffn_handle_first_relu_layer(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
weights : POINTER(POINTER(c_double))
The weight matrix.
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_relu_layer_c = fppoly_api.ffn_handle_first_relu_layer
ffn_handle_first_relu_layer_c.restype = None
ffn_handle_first_relu_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_relu_layer_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_relu_layer" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_relu_layer_no_alloc(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
weights : POINTER(POINTER(c_double))
The weight matrix.
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_relu_layer_no_alloc_c = fppoly_api.ffn_handle_first_relu_layer_no_alloc
ffn_handle_first_relu_layer_no_alloc_c.restype = None
ffn_handle_first_relu_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_relu_layer_no_alloc_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_relu_layer_no_alloc" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_sigmoid_layer(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the FFN first Sigmoid layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix.
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_sigmoid_layer_c = fppoly_api.ffn_handle_first_sigmoid_layer
ffn_handle_first_sigmoid_layer_c.restype = None
ffn_handle_first_sigmoid_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_sigmoid_layer_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_sigmoid_layer" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_sigmoid_layer_no_alloc(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the FFN first Sigmoid layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix.
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_sigmoid_layer_no_alloc_c = fppoly_api.ffn_handle_first_sigmoid_layer_no_alloc
ffn_handle_first_sigmoid_layer_no_alloc_c.restype = None
ffn_handle_first_sigmoid_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_sigmoid_layer_no_alloc_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_sigmoid_layer_no_alloc" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_tanh_layer(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN Tanh layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_tanh_layer_c = fppoly_api.ffn_handle_first_tanh_layer
ffn_handle_first_tanh_layer_c.restype = None
ffn_handle_first_tanh_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_tanh_layer_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_tanh_layer" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_tanh_layer_no_alloc(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN Tanh layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_tanh_layer_no_alloc_c = fppoly_api.ffn_handle_first_tanh_layer_no_alloc
ffn_handle_first_tanh_layer_no_alloc_c.restype = None
ffn_handle_first_tanh_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_tanh_layer_no_alloc_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_tanh_layer_no_alloc" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_parabola_layer(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN Parabolic layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_parabola_layer_c = fppoly_api.ffn_handle_first_parabola_layer
ffn_handle_first_parabola_layer_c.restype = None
ffn_handle_first_parabola_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_parabola_layer_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_parabola_layer" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_parabola_layer_no_alloc(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN Parabolic layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_parabola_layer_no_alloc_c = fppoly_api.ffn_handle_first_parabola_layer_no_alloc
ffn_handle_first_parabola_layer_no_alloc_c.restype = None
ffn_handle_first_parabola_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_parabola_layer_no_alloc_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_parabola_layer_no_alloc" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_log_layer(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_log_layer_c = fppoly_api.ffn_handle_first_log_layer
ffn_handle_first_log_layer_c.restype = None
ffn_handle_first_log_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_log_layer_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_log_layer" from "libfppoly.so"')
print(inst)
return
def ffn_handle_first_log_layer_no_alloc(man, element,weights, bias, size, num_pixels, predecessors):
"""
handle the first FFN log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
weights : POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_double)
The bias vector
size: c_size_t
Number of neurons in the first layer
num_pixels:
Number of pixels in the input
predecessors:
the layers before the current layer
Returns
-------
res : ElinaAbstract0Ptr
Pointer to the new abstract object.
"""
try:
ffn_handle_first_log_layer_no_alloc_c = fppoly_api.ffn_handle_first_log_layer_no_alloc
ffn_handle_first_log_layer_no_alloc_c.restype = None
ffn_handle_first_log_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t)]
ffn_handle_first_log_layer_no_alloc_c(man,element,weights, bias, size, num_pixels, predecessors)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_first_log_layer_no_alloc" from "libfppoly.so"')
print(inst)
return
def ffn_handle_intermediate_affine_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_affine_layer_c = fppoly_api.ffn_handle_intermediate_affine_layer
ffn_handle_intermediate_affine_layer_c.restype = None
ffn_handle_intermediate_affine_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_affine_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_affine_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_affine_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_affine_layer_no_alloc_c = fppoly_api.ffn_handle_intermediate_affine_layer_no_alloc
ffn_handle_intermediate_affine_layer_no_alloc_c.restype = None
ffn_handle_intermediate_affine_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_affine_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_affine_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_relu_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_relu_layer_c = fppoly_api.ffn_handle_intermediate_relu_layer
ffn_handle_intermediate_relu_layer_c.restype = None
ffn_handle_intermediate_relu_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_relu_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_relu_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_relu_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_relu_layer_no_alloc_c = fppoly_api.ffn_handle_intermediate_relu_layer_no_alloc
ffn_handle_intermediate_relu_layer_no_alloc_c.restype = None
ffn_handle_intermediate_relu_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_relu_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_relu_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_sigmoid_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Sigmoid layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_sigmoid_layer_c = fppoly_api.ffn_handle_intermediate_sigmoid_layer
ffn_handle_intermediate_sigmoid_layer_c.restype = None
ffn_handle_intermediate_sigmoid_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool]
ffn_handle_intermediate_sigmoid_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_sigmoid_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_sigmoid_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Sigmoid layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_sigmoid_layer_no_alloc_c = fppoly_api.ffn_handle_intermediate_sigmoid_layer_no_alloc
ffn_handle_intermediate_sigmoid_layer_no_alloc_c.restype = None
ffn_handle_intermediate_sigmoid_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_sigmoid_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_sigmoid_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_tanh_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Tanh layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_tanh_layer_c = fppoly_api.ffn_handle_intermediate_tanh_layer
ffn_handle_intermediate_tanh_layer_c.restype = None
ffn_handle_intermediate_tanh_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_tanh_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_tanh_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_tanh_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Tanh layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_tanh_layer_no_alloc_c = fppoly_api.ffn_handle_intermediate_tanh_layer_no_alloc
ffn_handle_intermediate_tanh_layer_no_alloc_c.restype = None
ffn_handle_intermediate_tanh_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool]
ffn_handle_intermediate_tanh_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_tanh_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_parabola_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Parabolic layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_parabola_layer_c = fppoly_api.ffn_handle_intermediate_parabola_layer
ffn_handle_intermediate_parabola_layer_c.restype = None
ffn_handle_intermediate_parabola_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool]
ffn_handle_intermediate_parabola_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_parabola_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_parabola_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Parabolic layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_parabola_layer_no_alloc_c = fppoly_api.ffn_handle_intermediate_parabola_layer_no_alloc
ffn_handle_intermediate_parabola_layer_no_alloc_c.restype = None
ffn_handle_intermediate_parabola_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool]
ffn_handle_intermediate_parabola_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_parabola_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_log_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_log_layer_c = fppoly_api.ffn_handle_intermediate_log_layer
ffn_handle_intermediate_log_layer_c.restype = None
ffn_handle_intermediate_log_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool]
ffn_handle_intermediate_log_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_log_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_intermediate_log_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic):
"""
handle the intermediate FFN Log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the abstract element
weights: POINTER(POINTER(c_double))
The weight matrix.
bias: POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
number of output neurons
num_in_neurons: c_size_t
number of input neurons
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_intermediate_log_layer_no_alloc_c = fppoly_api.ffn_handle_intermediate_log_layer_no_alloc
ffn_handle_intermediate_log_layer_no_alloc_c.restype = None
ffn_handle_intermediate_log_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool]
ffn_handle_intermediate_log_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_intermediate_log_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_last_relu_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_relu, use_area_heuristic):
"""
handle the last FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_relu: c_bool
if the last layer has a ReLU activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_relu_layer_c = fppoly_api.ffn_handle_last_relu_layer
ffn_handle_last_relu_layer_c.restype = None
ffn_handle_last_relu_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool, c_bool]
ffn_handle_last_relu_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_relu, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_relu_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_last_relu_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_relu, use_area_heuristic):
"""
handle the last FFN ReLU layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
has_relu: c_bool
if the last layer has a ReLU activation
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_relu_layer_no_alloc_c = fppoly_api.ffn_handle_last_relu_layer_no_alloc
ffn_handle_last_relu_layer_no_alloc_c.restype = None
ffn_handle_last_relu_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_relu_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_relu, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_relu_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_last_sigmoid_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_sigmoid, use_area_heuristic):
"""
handle the last FFN Sigmoid layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_sigmoid: c_bool
if the last layer has a Sigmoid activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_sigmoid_layer_c = fppoly_api.ffn_handle_last_sigmoid_layer
ffn_handle_last_sigmoid_layer_c.restype = None
ffn_handle_last_sigmoid_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_sigmoid_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_sigmoid, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_sigmoid_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_last_sigmoid_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_sigmoid, use_area_heuristic):
"""
handle the last FFN Sigmoid layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_sigmoid: c_bool
if the last layer has a Sigmoid activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_sigmoid_layer_no_alloc_c = fppoly_api.ffn_handle_last_sigmoid_layer_no_alloc
ffn_handle_last_sigmoid_layer_no_alloc_c.restype = None
ffn_handle_last_sigmoid_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_sigmoid_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_sigmoid, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_sigmoid_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_last_tanh_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_tanh, use_area_heuristic):
"""
handle the last FFN Tanh layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_tanh: c_bool
if the last layer has a Tanh activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_tanh_layer_c = fppoly_api.ffn_handle_last_tanh_layer
ffn_handle_last_tanh_layer_c.restype = None
ffn_handle_last_tanh_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_tanh_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_tanh, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_tanh_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_last_tanh_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_tanh, use_area_heuristic):
"""
handle the last FFN Tanh layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_tanh: c_bool
if the last layer has a Tanh activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_tanh_layer_no_alloc_c = fppoly_api.ffn_handle_last_tanh_layer_no_alloc
ffn_handle_last_tanh_layer_no_alloc_c.restype = None
ffn_handle_last_tanh_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_tanh_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_tanh, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_tanh_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_last_parabola_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_parabola, use_area_heuristic):
"""
handle the last FFN Parabolic layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_parabola: c_bool
if the last layer has a Parabola activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_parabola_layer_c = fppoly_api.ffn_handle_last_parabola_layer
ffn_handle_last_parabola_layer_c.restype = None
ffn_handle_last_parabola_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_parabola_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_parabola, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_parabola_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_last_parabola_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_parabola, use_area_heuristic):
"""
handle the last FFN Parabolic layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_parabola: c_bool
if the last layer has a Parabola activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_parabola_layer_no_alloc_c = fppoly_api.ffn_handle_last_parabola_layer_no_alloc
ffn_handle_last_parabola_layer_no_alloc_c.restype = None
ffn_handle_last_parabola_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_parabola_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_parabola, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_parabola_layer_no_alloc" from "libfppoly.so"')
print(inst)
def ffn_handle_last_log_layer(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic):
"""
handle the last FFN Log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
predecessors:
the layers before the current layer
has_log: c_bool
if the last layer has a Log activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_log_layer_c = fppoly_api.ffn_handle_last_log_layer
ffn_handle_last_log_layer_c.restype = None
ffn_handle_last_log_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t),c_bool, c_bool]
ffn_handle_last_log_layer_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_log_layer" from "libfppoly.so"')
print(inst)
def ffn_handle_last_log_layer_no_alloc(man, element, weights, bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic):
"""
handle the last FFN Log layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element
weights: POINTER(POINTER(c_double))
The weight matrix
bias : POINTER(c_size_t)
The bias vector
num_out_neurons: c_size_t
The number of output neurons
num_in_neurons: c_size_t
The number of input_neurons
has_log: c_bool
if the last layer has a Log activation
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
ffn_handle_last_log_layer_no_alloc_c = fppoly_api.ffn_handle_last_log_layer_no_alloc
ffn_handle_last_log_layer_no_alloc_c.restype = None
ffn_handle_last_log_layer_no_alloc_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool, c_bool]
ffn_handle_last_log_layer_no_alloc_c(man,element,weights,bias, num_out_neurons, num_in_neurons, predecessors, has_log, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "ffn_handle_last_log_layer_no_alloc" from "libfppoly.so"')
print(inst)
def subtract_output_neurons(man, element, y, x, use_area_heuristic):
"""
Computes bounds on y - x
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
destructive : c_bool
Boolean flag.
y : ElinaDim
The dimension y in the constraint y-x>0.
x: ElinaDim
The dimension x in the constraint y-x>0.
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
res = boolean
"""
res = None
try:
subtract_output_neurons_c = fppoly_api.subtract_output_neurons
subtract_output_neurons_c.restype = ElinaIntervalPtr
subtract_output_neurons_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaDim, ElinaDim, c_bool]
res = subtract_output_neurons_c(man, element, y, x, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "subtract_output_neurons" from "libfppoly.so"')
print(inst)
return res
def is_greater(man, element, y, x, use_area_heuristic):
"""
Check if y is strictly greater than x in the abstract element
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
destructive : c_bool
Boolean flag.
y : ElinaDim
The dimension y in the constraint y-x>0.
x: ElinaDim
The dimension x in the constraint y-x>0.
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
res = boolean
"""
res= None
try:
is_greater_c = fppoly_api.is_greater
is_greater_c.restype = c_bool
is_greater_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaDim, ElinaDim, c_bool]
res = is_greater_c(man,element,y, x, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "is_greater" from "libfppoly.so"')
print(inst)
return res
def conv_handle_first_layer(man, element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors):
"""
Convolutional Matrix multiplication in the first layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
filter_weights: POINTER(double)
filter weights
filter_bias: POINTER(double)
filter biases
input_size: POINTER(c_size_t)
size of the input
filter_size: POINTER(c_size_t)
size of the filters
num_filters: c_size_t
number of filters
strides: POINTER(c_size_t)
size of the strides
is_valid_padding: c_bool
if the padding is valid
has_bias: c_bool
if the filter has bias
predecessors:
the layers before the current layer
Returns
-------
None
"""
try:
conv_handle_first_layer_c = fppoly_api.conv_handle_first_layer
conv_handle_first_layer_c.restype = None
conv_handle_first_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ndpointer(ctypes.c_double), ndpointer(ctypes.c_double), ndpointer(ctypes.c_size_t), POINTER(c_size_t), c_size_t, POINTER(c_size_t), c_bool, c_bool, POINTER(c_size_t)]
conv_handle_first_layer_c(man,element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors)
except Exception as inst:
print('Problem with loading/calling "conv_handle_first_layer" from "libfppoly.so"')
print(inst)
return
def conv_handle_intermediate_relu_layer(man, element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors, use_area_heuristic):
"""
Convolutional Matrix multiplication in an Intermediate layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
filter_weights: POINTER(double)
filter weights
filter_bias: POINTER(double)
filter biases
input_size: POINTER(c_size_t)
size of the input
filter_size: POINTER(c_size_t)
size of the filters
num_filters: c_size_t
number of filters
strides: POINTER(c_size_t)
size of the strides
is_valid_padding: c_bool
if the padding is valid
has_bias: c_bool
if the filter has bias
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
-------
None
"""
try:
conv_handle_intermediate_relu_layer_c = fppoly_api.conv_handle_intermediate_relu_layer
conv_handle_intermediate_relu_layer_c.restype = None
conv_handle_intermediate_relu_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ndpointer(ctypes.c_double), ndpointer(ctypes.c_double), ndpointer(ctypes.c_size_t), POINTER(c_size_t), c_size_t, POINTER(c_size_t), c_bool, c_bool, POINTER(c_size_t), c_bool]
conv_handle_intermediate_relu_layer_c(man, element, filter_weights, filter_bias, input_size, filter_size, num_filters, strides, is_valid_padding, has_bias, predecessors, use_area_heuristic)
except Exception as inst:
print('Problem with loading/calling "conv_handle_intermediate_relu_layer" from "libfppoly.so"')
print(inst)
def handle_maxpool_layer(man, element, pool_size, input_size, predecessors):
"""
handle the Maxpool layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
pool_size: POINTER(c_size_t)
The size of the Maxpool filter
input_size : POINTER(c_size_t)
The number of variables on which Maxpool will be applied.
predecessors:
the layers before the current layer
Returns
-------
res : c_size_t
Number of neurons in the last layer
"""
res=None
try:
handle_maxpool_layer_c = fppoly_api.handle_maxpool_layer
handle_maxpool_layer_c.restype = c_size_t
handle_maxpool_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ndpointer(ctypes.c_size_t), ndpointer(ctypes.c_size_t), POINTER(c_size_t)]
res = handle_maxpool_layer_c(man, element, pool_size, input_size, predecessors)
except Exception as inst:
print('Problem with loading/calling "handle_maxpool_layer" from "libfppoly.so"')
print(inst)
return res
def handle_residual_layer(man, element, num_neurons, predecessors):
"""
handle the Residual layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0 abstract element.
num_neurons: c_size_t
The number of neurons in the residual layer
predecessors:
the layers before the current layer
Returns
-------
None
"""
try:
handle_residual_layer_c = fppoly_api.handle_residual_layer
handle_residual_layer_c.restype = None
handle_residual_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, POINTER(c_size_t)]
handle_residual_layer_c(man, element, num_neurons, predecessors)
except Exception as inst:
print('Problem with loading/calling "handle_residual_layer" from "libfppoly.so"')
print(inst)
def box_for_neuron(man, element,layerno, neuron_no):
"""
returns bounds for a neuron in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
neuron_no: c_size_t
the neuron number in the layer
Returns
-------
interval_array : ElinaIntervalPtr
ElinaIntervalArray representing the hypercube.
"""
interval = None
try:
box_for_neuron_c = fppoly_api.box_for_neuron
box_for_neuron_c.restype = ElinaIntervalPtr
box_for_neuron_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t]
interval = box_for_neuron_c(man, element,layerno, neuron_no)
except:
print('Problem with loading/calling "box_for_neuron" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t to the function')
return interval
def box_for_layer(man, element,layerno):
"""
returns bounds for all neurons in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
Returns
-------
interval_array : ElinaIntervalArray
ElinaIntervalArray representing the hypercube.
"""
interval_array = None
try:
box_for_layer_c = fppoly_api.box_for_layer
box_for_layer_c.restype = ElinaIntervalArray
box_for_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
interval_array = box_for_layer_c(man, element,layerno)
except:
print('Problem with loading/calling "box_for_layer" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return interval_array
def get_num_neurons_in_layer(man, element,layerno):
"""
returns the number of neurons in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
Returns
-------
interval_array : ElinaIntervalArray
ElinaIntervalArray representing the hypercube.
"""
res = 0
try:
get_num_neurons_in_layer_c = fppoly_api.get_num_neurons_in_layer
get_num_neurons_in_layer_c.restype = c_size_t
get_num_neurons_in_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
res = get_num_neurons_in_layer_c(man, element,layerno)
except:
print('Problem with loading/calling "get_num_neurons_in_layer" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return res
def update_bounds_for_neuron(man, element,layerno, neuron_no, lb, ub):
"""
returns bounds for a neuron in a layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
neuron_no: c_size_t
the neuron number in the layer
lb: c_double
the updated lower bound
ub: c_double
the updated upper bound
Returns
-------
None
"""
try:
update_bounds_for_neuron_c = fppoly_api.update_bounds_for_neuron
update_bounds_for_neuron_c.restype = None
update_bounds_for_neuron_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t, c_double, c_double]
update_bounds_for_neuron_c(man, element,layerno, neuron_no, lb, ub)
except:
print('Problem with loading/calling "update_bounds_for_neuron" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, c_size_t, c_double, c_double to the function')
def get_bounds_for_linexpr0(man,element,linexpr0,layerno):
"""
returns bounds for a linexpr0 over neurons in "layerno"
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
linexpr0 : ElinaLinexpr0Ptr
Pointer to the Elinalinexpr0
layerno: c_size_t
the layer number
Returns
-------
interval : ElinaIntervalPtr
Poiner to the Elinainterval
"""
interval = None
try:
get_bounds_for_linexpr0_c = fppoly_api.get_bounds_for_linexpr0
get_bounds_for_linexpr0_c.restype = ElinaIntervalPtr
get_bounds_for_linexpr0_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr, c_size_t]
interval = get_bounds_for_linexpr0_c(man, element, linexpr0, layerno)
except:
print('Problem with loading/calling "get_bounds_for_linexpr0" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, ElinaLinexpr0Ptr, c_size_t to the function')
return interval
def get_lexpr_for_output_neuron(man,element,i):
"""
returns lower polyhedra constraint for the i-th output neuron in terms of the input neurons
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
i: c_size_t
output neuron number
Returns
-------
expr : ElinaLinexpr0Ptr
The lower polyhedra expression for the output neuron in terms of input parameters and pixels
"""
linexpr0 = None
try:
get_lexpr_for_output_neuron_c = fppoly_api.get_lexpr_for_output_neuron
get_lexpr_for_output_neuron_c.restype = ElinaLinexpr0Ptr
get_lexpr_for_output_neuron_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
linexpr0 = get_lexpr_for_output_neuron_c(man,element,i)
except:
print('Problem with loading/calling "get_lexpr_for_output_neuron" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return linexpr0
def get_uexpr_for_output_neuron(man,element,i):
"""
returns lower polyhedra constraint for the i-th output neuron in terms of the input neurons
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
i: c_size_t
output neuron number
Returns
-------
expr : ElinaLinexpr0Ptr
The upper polyhedra expression for the output neuron in terms of input parameters and pixels
"""
linexpr0 = None
try:
get_uexpr_for_output_neuron_c = fppoly_api.get_uexpr_for_output_neuron
get_uexpr_for_output_neuron_c.restype = ElinaLinexpr0Ptr
get_uexpr_for_output_neuron_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
linexpr0 = get_uexpr_for_output_neuron_c(man,element,i)
except:
print('Problem with loading/calling "get_uexpr_for_output_neuron" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return linexpr0
def create_lstm_layer(man, element,h, predecessors):
"""
creates an lstm layer for the neural network, this should be called only once per each lstm layer
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
h: c_size_t
size of h_t
predecessors:
the layers before the current layer
Returns
--------
None
"""
try:
create_lstm_layer_c = fppoly_api.create_lstm_layer
create_lstm_layer_c.restype = None
create_lstm_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t, POINTER(c_size_t)]
create_lstm_layer_c(man,element,h, predecessors)
except:
print('Problem with loading/calling "create_lstm_layer" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
return
def handle_lstm_layer(man, element, weights, bias, d, h, predecessors, use_area_heuristic):
"""
computes the hidden states and output vectors of the lstm unit, to be called at each time step after creating an LSTM unit
Parameters
-----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
weights : POINTER(POINTER(c_double))
The weight matrix of size 4*h \times d+h, with h rows each for f_t, i_t, o_t, and c_t in order,
columnwise the first d entries correspond to x_t and the remaining correspond to h_t
bias : POINTER(c_double)
The bias vector of size 4*h, in the same format as weights
d: c_size_t
size of x_t
h: c_size_t
size of h_t
predecessors:
the layers before the current layer
use_area_heuristic: c_bool
whether to use area heuristic
Returns
--------
None
"""
try:
handle_lstm_layer_c = fppoly_api.handle_lstm_layer
handle_lstm_layer_c.restype = None
handle_lstm_layer_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, POINTER(c_size_t), c_bool]
handle_lstm_layer_c(man,element,weights,bias,d,h, predecessors, use_area_heuristic)
except:
print('Problem with loading/calling "handle_lstm_layer" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, _doublepp, ndpointer(ctypes.c_double), c_size_t, c_size_t, c_bool to the function')
return
def free_non_lstm_layer_expr(man,element,layerno):
"""
returns bounds for a linexpr0 over neurons in "layerno"
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
element : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
layerno: c_size_t
the layer number
Returns
-------
None
"""
try:
free_non_lstm_layer_expr_c = fppoly_api.free_non_lstm_layer_expr
free_non_lstm_layer_expr_c.restype = None
free_non_lstm_layer_expr_c.argtypes = [ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t]
free_non_lstm_layer_expr_c(man, element, layerno)
except:
print('Problem with loading/calling "free_non_lstm_layer_expr" from "fppoly.so"')
print('Make sure you are passing ElinaManagerPtr, ElinaAbstract0Ptr, c_size_t to the function')
| 36.662713
| 315
| 0.697112
| 9,198
| 71,089
| 5.047402
| 0.032942
| 0.028002
| 0.033602
| 0.020721
| 0.931396
| 0.90822
| 0.878495
| 0.856287
| 0.81142
| 0.775578
| 0
| 0.003329
| 0.235128
| 71,089
| 1,938
| 316
| 36.681631
| 0.85048
| 0.371675
| 0
| 0.359223
| 0
| 0.001942
| 0.135903
| 0.042547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100971
| false
| 0.019417
| 0.015534
| 0
| 0.16699
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
436e96b2dd7d99c3a213279cf4a323213ab3ebba
| 465
|
py
|
Python
|
chainer/training/updaters/__init__.py
|
LuoYuanke/PrivChainer
|
758d765c7903f6913cfd58c21db069d5f2a12203
|
[
"MIT"
] | 1
|
2021-05-31T08:59:28.000Z
|
2021-05-31T08:59:28.000Z
|
chainer/training/updaters/__init__.py
|
LuoYuanke/PrivChainer
|
758d765c7903f6913cfd58c21db069d5f2a12203
|
[
"MIT"
] | null | null | null |
chainer/training/updaters/__init__.py
|
LuoYuanke/PrivChainer
|
758d765c7903f6913cfd58c21db069d5f2a12203
|
[
"MIT"
] | 1
|
2022-02-20T10:32:59.000Z
|
2022-02-20T10:32:59.000Z
|
from chainer.training.updaters import multiprocess_parallel_updater # NOQA
from chainer.training.updaters import parallel_updater # NOQA
from chainer.training.updaters import standard_updater # NOQA
from chainer.training.updaters.multiprocess_parallel_updater import MultiprocessParallelUpdater # NOQA
from chainer.training.updaters.parallel_updater import ParallelUpdater # NOQA
from chainer.training.updaters.standard_updater import StandardUpdater # NOQA
| 58.125
| 103
| 0.862366
| 53
| 465
| 7.415094
| 0.245283
| 0.167939
| 0.290076
| 0.412214
| 0.603053
| 0.361323
| 0.264631
| 0.264631
| 0
| 0
| 0
| 0
| 0.092473
| 465
| 7
| 104
| 66.428571
| 0.93128
| 0.062366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
43814c0b6d6ada416e824a7eec33b0b34fcfa638
| 1,868
|
py
|
Python
|
aviral_api/base.py
|
shivamhw/aviral
|
f13450b01fbd0679a8eea0ad8df4122f9ed74925
|
[
"MIT"
] | 1
|
2021-12-02T17:29:11.000Z
|
2021-12-02T17:29:11.000Z
|
aviral_api/base.py
|
shivamhw/aviral
|
f13450b01fbd0679a8eea0ad8df4122f9ed74925
|
[
"MIT"
] | null | null | null |
aviral_api/base.py
|
shivamhw/aviral
|
f13450b01fbd0679a8eea0ad8df4122f9ed74925
|
[
"MIT"
] | null | null | null |
from typing import Any
import requests
from . import exceptions
import json
class api_caller:
def _get_call(self, url : str, header_param : dict = None, timeout : Any = 10) -> dict:
try:
response = requests.get(url, headers=header_param, timeout=timeout)
response.raise_for_status()
return response.json()
except requests.exceptions.ConnectTimeout:
raise exceptions.AviralDownError("Aviral timeout, may be slow response from aviral")
except requests.exceptions.ConnectionError:
raise exceptions.AviralDownError("Could not connect to Aviral")
except requests.exceptions.HTTPError:
raise exceptions.InvalidResponseError("Server sent invalid response, There might be an issue with the data sent or expired token")
except json.decoder.JSONDecodeError:
raise exceptions.InvalidResponseError("There might be an issue with the data sent or expired token.")
def _post_call(self, url : str, datas : dict, header_param : dict = None, timeout : Any = 10) -> dict:
try:
response = requests.post(url, headers=header_param, data=json.dumps(datas), timeout=timeout)
return response.json()
except requests.exceptions.ConnectTimeout:
raise exceptions.AviralDownError("Aviral timeout, may be slow response from aviral")
except requests.exceptions.ConnectionError:
raise exceptions.AviralDownError("Could not connect to Aviral")
except requests.exceptions.HTTPError:
raise exceptions.InvalidResponseError("Server sent invalid response, There might be an issue with the data sent or expired token")
except json.decoder.JSONDecodeError:
raise exceptions.InvalidResponseError("There might be an issue with the data sent or expired token.")
| 51.888889
| 142
| 0.70182
| 216
| 1,868
| 6.018519
| 0.282407
| 0.092308
| 0.110769
| 0.092308
| 0.812308
| 0.812308
| 0.812308
| 0.812308
| 0.812308
| 0.812308
| 0
| 0.002772
| 0.227516
| 1,868
| 35
| 143
| 53.371429
| 0.898129
| 0
| 0
| 0.666667
| 0
| 0
| 0.240214
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43823bfc7b7444bea874f8d5170389fe0765f5bd
| 38,664
|
py
|
Python
|
tests/test_bidsify_flywheel.py
|
PennBBL/xbids_client
|
3f7d0f880276d1f7bad271fa5df181c449ad5005
|
[
"MIT"
] | null | null | null |
tests/test_bidsify_flywheel.py
|
PennBBL/xbids_client
|
3f7d0f880276d1f7bad271fa5df181c449ad5005
|
[
"MIT"
] | null | null | null |
tests/test_bidsify_flywheel.py
|
PennBBL/xbids_client
|
3f7d0f880276d1f7bad271fa5df181c449ad5005
|
[
"MIT"
] | null | null | null |
import os
import json
import re
import shutil
import unittest
from flywheel_bids.supporting_files import utils, bidsify_flywheel
class BidsifyTestCases(unittest.TestCase):
def setUp(self):
# Define testdir
self.testdir = 'testdir'
self.maxDiff = None
def tearDown(self):
# Cleanup 'testdir', if present
if os.path.exists(self.testdir):
shutil.rmtree(self.testdir)
def test_process_string_template_required(self):
""" """
# Define project template from the templates file
auto_update_str = 'sub-<subject.code>_ses-<session.label>_bold.nii.gz'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {u'code': u'00123'},
'session': {u'label': u'session444'},
'acquisition': {u'label': u'acq222'},
'file': None,
'ext': None
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
self.assertEqual(updated_string,
'sub-%s_ses-%s_bold.nii.gz' % (
context['subject']['code'],
context['session']['label'],
))
def test_process_string_template_bids1(self):
""" """
# Get project template from the templates file
auto_update_str = 'sub-<subject.code>_ses-<session.label>_bold.nii.gz'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {u'code': u'sub-01'},
'session': {u'label': u'ses-001'},
'acquisition': {u'label': u'acq222'},
'file': None,
'ext': None
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
self.assertEqual(updated_string,
'%s_%s_bold.nii.gz' % (
context['subject']['code'],
context['session']['label']
))
def test_process_string_template_optional(self):
""" """
# Define string to auto update, subject code is optional
auto_update_str = '[sub-<subject.code>]_ses-<session.label>_acq-<acquisition.label>_bold.nii.gz'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {u'code': None},
'session': {u'label': u'session444'},
'acquisition': {u'label': u'acq222'},
'file': None,
'ext': None
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
# Assert function honors the optional 'sub-<subject.code>'
self.assertEqual(updated_string,
'_ses-%s_acq-%s_bold.nii.gz' % (
context['session']['label'],
context['acquisition']['label']
))
def test_process_string_template_full_optional(self):
""" """
auto_update_str = 'sub-<subject.code>[_ses-<session.label>][_acq-{file.info.BIDS.Acq}][_ce-{file.info.BIDS.Ce}][_rec-{file.info.BIDS.Rec}][_run-{file.info.BIDS.Run}][_mod-{file.info.BIDS.Mod}]'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {u'code': u'123'},
'session': {u'label': u'456'},
'acquisition': {u'label': u'acq222'},
'file': {u'classification': {u'Measurement': u'T1', u'Intent': u'Structural'}},
'ext': '.nii.gz'
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
# Assert function honors the optional labels
self.assertEqual(updated_string,
'sub-123_ses-456')
def test_process_string_template_func_filename1(self):
""" """
# Define string to auto update, subject code is optional
auto_update_str = 'sub-<subject.code>[_ses-<session.label>]_task-{file.info.BIDS.Task}_bold{ext}'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {u'code': '001'},
'session': {u'label': u'session444'},
'acquisition': {u'label': u'acq222'},
'file': {'name': 'bold.nii.gz',
'info': {'BIDS': {'Task': 'test123', 'Modality': 'bold'}}},
'ext': '.nii.gz'
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
# Assert string as expected
self.assertEqual(updated_string,
'sub-%s_ses-%s_task-%s_%s%s' % (
context['subject']['code'],
context['session']['label'],
context['file']['info']['BIDS']['Task'],
context['file']['info']['BIDS']['Modality'],
context['ext']
))
def test_process_string_template_required_notpresent(self):
""" """
# TODO: Determine the expected behavior of this...
# Define string to auto update
auto_update_str = 'sub-<subject.code>_ses-<session.label>'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {},
'session': {u'label': u'session444'},
'acquisition': {u'label': u'acq222'},
'file': None,
'ext': None
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
# Assert function honors the optional 'sub-<subject.code>'
self.assertEqual(updated_string,
'sub-<subject.code>_ses-%s' % (
context['session']['label']
))
def test_process_string_template_required_None(self):
""" """
# TODO: Determine the expected behavior of this...
# Define string to auto update
auto_update_str = 'sub-<subject.code>_ses-<session.label>'
# initialize context object
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': {u'label': u'project123'},
'subject': {u'code': None},
'session': {u'label': u'session444'},
'acquisition': {u'label': u'acq222'},
'file': None,
'ext': None
}
# Call function
updated_string = utils.process_string_template(auto_update_str, context)
# Assert function honors the optional 'sub-<subject.code>'
self.assertEqual(updated_string,
'sub-<subject.code>_ses-%s' % (
context['session']['label']
))
def test_add_properties_valid(self):
""" """
properties = {
"Filename": {"type": "string", "label": "Filename", "default": "",
"auto_update": 'sub-<subject.code>_ses-<session.label>[_acq-<acquisition.label>]_T1w{ext}'},
"Folder": {"type": "string", "label":"Folder", "default": "anat"},
"Ce": {"type": "string", "label": "CE Label", "default": ""},
"Rec": {"type": "string", "label": "Rec Label", "default": ""},
"Run": {"type": "string", "label": "Run Index", "default": ""},
"Mod": {"type": "string", "label": "Mod Label", "default": ""},
"Modality": {"type": "string", "label": "Modality Label", "default": "T1w",
"enum": [
"T1w","T2w","T1rho","T1map","T2map","FLAIR","FLASH","PD","PDmap",
"PDT2","inplaneT1","inplaneT2","angio","defacemask","SWImagandphase"
]
}
}
project_obj = {u'label': u'Project Name'}
# Call function
info_obj = bidsify_flywheel.add_properties(properties, project_obj, [u'anatomy_t1w'])
# Expected info object
for key in properties:
project_obj[key] = properties[key]['default']
self.assertEqual(info_obj, project_obj)
def test_update_properties_valid(self):
""" """
# Define inputs
properties = {
"Filename": {"type": "string", "label": "Filename", "default": "",
"auto_update": 'sub-<subject.code>_ses-<session.label>[_acq-<acquisition.label>]_T1w{ext}'},
"Folder": {"type": "string", "label":"Folder", "default": "anat"},
"Mod": {"type": "string", "label": "Mod Label", "default": ""},
"Modality": {"type": "string", "label": "Modality Label", "default": "T1w"}
}
context = {
'container_type': 'file', 'parent_container_type': 'acquisition',
'project': None, 'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST'}, 'acquisition': {u'label': u'acqTEST'},
'file': {
u'classification': {u'Measurement': u'T1', u'Intent': u'Structural'},
u'type': u'nifti'
},
'ext': '.nii.gz'
}
project_obj = {u'test1': u'123', u'test2': u'456'}
# Call function
info_obj = bidsify_flywheel.update_properties(properties, context, project_obj)
# Update project_obj, as expected
project_obj['Filename'] = u'sub-%s_ses-%s_acq-%s_T1w%s' % (
context['subject']['code'],
context['session']['label'],
context['acquisition']['label'],
context['ext']
)
self.assertEqual(project_obj, info_obj)
def test_process_matching_templates_anat_t1w(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Measurement': u'T1', u'Intent': u'Structural'},
u'type': u'nifti'
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'anat_file',
'Filename': u'sub-001_ses-sesTEST_T1w.nii.gz',
'Path': u'sub-001/ses-sesTEST/anat', 'Folder': 'anat',
'Run': '', 'Acq': '', 'Ce': '', 'Rec': '',
'Modality': 'T1w', 'Mod': '',
'ignore': False
}
},
u'classification': {u'Measurement': u'T1', u'Intent': u'Structural'}, u'type': u'nifti'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_anat_t2w(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Measurement': u'T2', u'Intent': u'Structural'},
u'type': u'nifti'
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'anat_file',
'Filename': u'sub-001_ses-sesTEST_T2w.nii.gz',
'Path': u'sub-001/ses-sesTEST/anat', 'Folder': 'anat',
'Run': '', 'Acq': '', 'Ce': '', 'Rec': '',
'Modality': 'T2w', 'Mod': '',
'ignore': False
}
},
u'classification': {u'Measurement': u'T2', u'Intent': u'Structural'}, u'type': u'nifti'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_func(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'run_counters': utils.RunCounterMap(),
'acquisition': {u'label': u'acq_task-TEST_run+'},
'file': {u'classification': {u'Intent': u'Functional'},
u'type': u'nifti',
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'func_file',
'Filename': u'sub-001_ses-sesTEST_task-TEST_run-1_bold.nii.gz',
'Folder': 'func', 'Path': u'sub-001/ses-sesTEST/func',
'Acq': '', 'Task': 'TEST', 'Modality': 'bold',
'Rec': '', 'Run': '1', 'Echo': '',
'ignore': False
}
},
u'classification': {u'Intent': u'Functional'}, u'type': u'nifti'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_task_events(self):
""""""
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Intent': u'Functional'},
u'type': u'tabular data',
},
'ext': '.tsv'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'task_events_file',
'Filename': u'sub-001_ses-sesTEST_task-{file.info.BIDS.Task}_events.tsv',
'Folder': 'func', 'Path': u'sub-001/ses-sesTEST/func',
'Acq': '', 'Task': '',
'Rec': '', 'Run': '', 'Echo': '',
'ignore': False
}
},
u'classification': {u'Intent': u'Functional'}, u'type': u'tabular data'}
self.assertEqual(container, container_expected)
def test_process_matching_beh_events_file(self):
""""""
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Custom': u'Behavioral'},
u'type': u'tabular data',
},
'ext': '.tsv'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'beh_events_file',
'Filename': u'sub-001_ses-sesTEST_task-{file.info.BIDS.Task}_events.tsv',
'Folder': 'beh', 'Path': u'sub-001/ses-sesTEST/beh', 'Task': '',
'ignore': False
}
},
u'classification': {u'Custom': u'Behavioral'}, u'type': u'tabular data'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_physio_task_events(self):
""""""
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Custom': u'Physio'},
u'type': u'tabular data',
},
'ext': '.tsv'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'physio_task_file',
'Filename': u'sub-001_ses-sesTEST_task-{file.info.BIDS.Task}_physio.tsv',
'Folder': 'func', 'Path': u'sub-001/ses-sesTEST/func',
'Acq': '', 'Task': '',
'Modality': 'physio',
'Rec': '',
'Recording': '',
'Run': '',
'Echo': '',
'ignore': False
}
},
u'classification': {u'Custom': u'Physio'}, u'type': u'tabular data'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_dwi_nifti(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'nifti'
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'diffusion_file',
'Filename': u'sub-001_ses-sesTEST_dwi.nii.gz',
'Path': u'sub-001/ses-sesTEST/dwi', 'Folder': 'dwi',
'Modality': 'dwi', 'Acq': '', 'Run': '',
'ignore': False
}
},
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'}, u'type': u'nifti'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_dwi_bval(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'bval'
},
'ext': '.bval'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'diffusion_file',
'Filename': u'sub-001_ses-sesTEST_dwi.bval',
'Path': u'sub-001/ses-sesTEST/dwi', 'Folder': 'dwi',
'Modality': 'dwi', 'Acq': '', 'Run': '',
'ignore': False
}
},
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'}, u'type': u'bval'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_dwi_bvec(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'bvec'
},
'ext': '.bvec'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'diffusion_file',
'Filename': u'sub-001_ses-sesTEST_dwi.bvec',
'Path': u'sub-001/ses-sesTEST/dwi', 'Folder': 'dwi',
'Modality': 'dwi', 'Acq': '', 'Run': '',
'ignore': False
}
},
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'}, u'type': u'bvec'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_fieldmap(self):
""""""
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {u'classification': {u'Intent': u'Fieldmap'},
u'type': u'nifti',
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'fieldmap_file',
'Filename': u'sub-001_ses-sesTEST_fieldmap.nii.gz',
'Folder': 'fmap', 'Path': u'sub-001/ses-sesTEST/fmap',
'Acq': '', 'Run': '', 'Dir': '', 'Modality': 'fieldmap',
'IntendedFor': [
{'Folder': 'anat'},
{'Folder': 'func'}
],
'ignore': False
}
},
u'classification': {u'Intent': u'Fieldmap'}, u'type': u'nifti'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_fieldmap_phase_encoded(self):
""""""
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST Topup PA'}, # Acquisition label needs to contain
'file': {u'classification': {u'Intent': u'Fieldmap'},
u'type': u'nifti'
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'fieldmap_phase_encoded_file',
'Filename': u'sub-001_ses-sesTEST_dir-PA_epi.nii.gz',
'Folder': 'fmap', 'Path': u'sub-001/ses-sesTEST/fmap',
'Acq': '', 'Run': '', 'Dir': 'PA', 'Modality': 'epi',
'IntendedFor': [
{'Folder': 'anat'},
{'Folder': 'func'}
],
'ignore': False
}
},
u'classification': {u'Intent': u'Fieldmap'}, u'type': u'nifti'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_dicom(self):
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': {u'label': 'hello'},
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'dicom'
},
'ext': '.dcm.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {'info': {'BIDS': {
'template': 'dicom_file',
'Filename': '',
'Folder': 'sourcedata',
'Path': u'sourcedata/sub-001/ses-sesTEST',
'ignore': False
}},
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'dicom'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_non_bids_dicom(self):
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': {u'label': 'hello'},
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST', u'id': u'09090'},
'file': {
u'name': u'4784_1_1_localizer',
u'classification': {u'Measurement': u'T2', u'Intent': u'Localizer'},
u'type': u'dicom'
},
'ext': '.dcm.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
print(container)
# Define expected container
container_expected = {
u'name': u'4784_1_1_localizer',
u'classification': {u'Measurement': u'T2', u'Intent': u'Localizer'},
u'type': u'dicom'
}
self.assertEqual(container, container_expected)
def test_resolve_initial_dicom_field_values_from_filename(self):
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': {u'label': 'hello'},
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {
u'name': u'09 cmrr_mbepi_task-spatialfrequency_s6_2mm_66sl_PA_TR1.0.dcm.zip',
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'dicom'
},
'ext': '.dcm.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {'info': {'BIDS': {
'template': 'dicom_file',
'Filename': u'09 cmrr_mbepi_task-spatialfrequency_s6_2mm_66sl_PA_TR1.0.dcm.zip',
'Folder': 'sourcedata',
'Path': u'sourcedata/sub-001/ses-sesTEST',
'ignore': False
}},
u'name': u'09 cmrr_mbepi_task-spatialfrequency_s6_2mm_66sl_PA_TR1.0.dcm.zip',
u'classification': {u'Measurement': u'Diffusion', u'Intent': u'Structural'},
u'type': u'dicom'}
self.assertEqual(container, container_expected)
def test_process_matching_template_acquisition(self):
""" """
# Define context
context = {
'container_type': 'acquisition',
'parent_container_type': 'session',
'project': {'label': 'Project_Label_Test'},
'subject': None,
'session': {'label': 'Session_Label_Test'},
'acquisition': {'label': 'Acquisition_Label_Test'},
'file': {},
'ext': '.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'acquisition',
'ignore': False
}
},
'label': 'Acquisition_Label_Test'
}
self.assertEqual(container, container_expected)
def test_process_matching_templates_acquisition_file(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': {'label': 'testproject'},
'subject': {'code': '12345'},
'session': {'label': 'haha', 'info': {'BIDS': {'Label': 'haha', 'Subject': '12345'}}},
'acquisition':{'label': 'blue', u'id': u'ID'},
'file': {u'type': u'image', u'name': u'fname'},
'ext': '.jpg'
}
# Won't match if not on upload
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {u'type': u'image', u'name': u'fname'}
self.assertEqual(container, container_expected)
# Call function
container = bidsify_flywheel.process_matching_templates(context, upload=True)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'acquisition_file',
'Filename': '', 'Folder': 'acq-blue', 'Path': 'sub-12345/ses-haha/acq-blue',
'ignore': False
}
},
u'type': u'image',
u'name': u'fname'
}
self.assertEqual(container, container_expected)
def test_process_matching_templates_session(self):
""" """
# Define context
context = {
'container_type': 'session',
'parent_container_type': 'project',
'project': {'label': 'Project_Label_Test'},
'subject': {'code' : '12345'},
'session': {'label': 'Session_Label_Test'},
'acquisition': None,
'file': {},
'ext': '.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'Label': 'SessionLabelTest',
'Subject': '12345',
'template': 'session',
'ignore': False
}
},
'label': 'Session_Label_Test'
}
self.assertEqual(container, container_expected)
def test_process_matching_templates_session_file(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'session',
'project': {'label': 'testproject'},
'subject': {'code': '12345'},
'session': {'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'12345'}}},
'acquisition': None,
'file': {u'type': u'tabular'},
'ext': '.tsv'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'session_file',
'Filename': '', 'Folder': 'ses-sesTEST', 'Path': 'sub-12345/ses-sesTEST',
'ignore': False
}
},
u'type': u'tabular'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_project(self):
""" """
# Define context
context = {
'container_type': 'project',
'parent_container_type': 'group',
'project': {'label': 'Project_Label_Test'},
'subject': None,
'session': None,
'acquisition': None,
'file': {},
'ext': '.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'Acknowledgements': '',
'Authors': [],
'BIDSVersion': '1.0.2',
'DatasetDOI': '',
'Funding': '',
'HowToAcknowledge': '',
'License': '',
'Name': 'Project_Label_Test',
'ReferencesAndLinks': [],
'template': 'project'
}
},
'label': 'Project_Label_Test'
}
self.assertEqual(container, container_expected)
def test_process_matching_templates_project_file(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'project',
'project': None,
'subject': None,
'session': None,
'acquisition': None,
'file': {u'classification': {},
u'type': u'archive'},
'ext': '.zip'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'project_file',
'Filename': '', 'Folder': '', 'Path': '',
'ignore': False
}
},
u'classification': {}, u'type': u'archive'}
self.assertEqual(container, container_expected)
def test_process_matching_templates_project_file_multiple_measurements(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'acquisition': {u'label': u'acqTEST'},
'file': {
u'classification': {u'Measurement': [u'T1', u'T2'], u'Intent': u'Structural'},
u'type': u'nifti'
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
'info': {
'BIDS': {
'template': 'anat_file',
'Filename': u'sub-001_ses-sesTEST_T1w.nii.gz',
'Path': u'sub-001/ses-sesTEST/anat', 'Folder': 'anat',
'Run': '', 'Acq': '', 'Ce': '', 'Rec': '',
'Modality': 'T1w', 'Mod': '',
'ignore': False
}
},
u'classification': {u'Measurement': [u'T1', u'T2'], u'Intent': u'Structural'}, u'type': u'nifti'}
print(container)
self.assertEqual(container, container_expected)
def test_process_matching_templates_BIDS_NA(self):
""" """
# Define context
context = {
'container_type': 'file',
'parent_container_type': 'acquisition',
'project': None,
'subject': {u'code': u'001'},
'session': {u'label': u'sesTEST', 'info': {'BIDS': {'Label': u'sesTEST', 'Subject': u'001'}}},
'run_counters': utils.RunCounterMap(),
'acquisition': {u'label': u'acq_task-TEST_run+'},
'file': {u'classification': {u'Intent': u'Functional'},
u'type': u'nifti','info': {'BIDS': 'NA'}
},
'ext': '.nii.gz'
}
# Call function
container = bidsify_flywheel.process_matching_templates(context)
# Define expected container
container_expected = {
u'classification': {u'Intent': u'Functional'},
u'type': u'nifti',
'info': {'BIDS': 'NA'}
}
self.assertEqual(container, container_expected)
def assertEqual(self, a, b):
a = utils.normalize_strings(a)
b = utils.normalize_strings(b)
unittest.TestCase.assertEqual(self, a, b)
if __name__ == "__main__":
unittest.main()
run_module_suite()
| 40.698947
| 201
| 0.493301
| 3,540
| 38,664
| 5.221186
| 0.066384
| 0.024022
| 0.021209
| 0.035059
| 0.871504
| 0.860791
| 0.823081
| 0.815777
| 0.793594
| 0.784396
| 0
| 0.015608
| 0.350429
| 38,664
| 949
| 202
| 40.741834
| 0.720327
| 0.059952
| 0
| 0.63329
| 0
| 0.006502
| 0.2975
| 0.070547
| 0
| 0
| 0
| 0.001054
| 0.044213
| 1
| 0.044213
| false
| 0
| 0.007802
| 0
| 0.053316
| 0.002601
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
439217ac931ab1f86b0d4bf982689d704681b924
| 12,549
|
py
|
Python
|
Python Code/Algorithm Range 0 - 50000/insert.py
|
Roisin-Fallon/Sorting_Algorithms
|
5ebb0fb3175982bbaa991556a5b09bb443636422
|
[
"Apache-2.0"
] | null | null | null |
Python Code/Algorithm Range 0 - 50000/insert.py
|
Roisin-Fallon/Sorting_Algorithms
|
5ebb0fb3175982bbaa991556a5b09bb443636422
|
[
"Apache-2.0"
] | null | null | null |
Python Code/Algorithm Range 0 - 50000/insert.py
|
Roisin-Fallon/Sorting_Algorithms
|
5ebb0fb3175982bbaa991556a5b09bb443636422
|
[
"Apache-2.0"
] | null | null | null |
# Code adapted from project specification
from random import * # Import python random module
def random_array(n): # Function takes as input a value n
array = [] # create an array variable
for i in range(0, n, 1): # i start at 0 stop at n an increment by 1 (e.g. if n=4 0,1,2,3)
array.append(randint(0,100)) # Add random generated integers with values between 0 and 99 to the array
return array
# assign the random array to alist
alist1= random_array(100)
alist2= random_array(500)
alist3= random_array(1000)
alist4 = random_array(2500)
alist5 = random_array(5000)
alist6 = random_array(7500)
alist7 = random_array(10000)
alist8 = random_array(12500)
alist9 = random_array(15000)
alist10 = random_array(17500)
alist11 = random_array(20000)
alist12 = random_array(25000)
alist13 = random_array(30000)
alist14 = random_array(40000)
alist15 = random_array(50000)
def insertionSort(alist): # Function to do insertion sort
for i in range(1,len(alist)): # Start for loop at second element (index 1), assume the first element is sorted
key=alist[i] # Next element inserted into sorted section of array
position = i -1 # Last element we are going to compare with
# Comparing the current element with the sorted position and swapping
while position>=0 and key < alist[position]: # Move the key as long as it is less than the previous item in the array
alist[position +1]=alist[position] # Move the last element compared on step above to make room for key
position -= 1 # The next item to compare
alist[position+1]=key #
import time # import time module
num_runs = 10 # Number of times to test the function i.e. we want 10 runs
results = [] # array to store results for each test
insertsort_avglist = []
def benchmark_insertionsort():
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist1) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist2) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist3) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist4) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist5) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist6) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist7) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist8) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist9) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist10) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist11) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist12) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist13) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist14) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
for r in range(num_runs): # Benchmark the function
start_time = time.time() # Log the start time in seconds
insertionSort(alist15) # Call the function insertion to benchmark
end_time = time.time() # Log the end time in seconds
time_elapsed= end_time - start_time # Calculate the elapsed time
results.append(time_elapsed)
b = sum(results) # Sum the results of the 10 runs
average = (b/num_runs) # Calculate the average of a run
insertsort_avglist.append(average)
print(insertsort_avglist)
benchmark_insertionsort()
| 56.782805
| 141
| 0.537015
| 1,418
| 12,549
| 4.642454
| 0.106488
| 0.072915
| 0.054686
| 0.068358
| 0.781559
| 0.781559
| 0.781559
| 0.781559
| 0.781559
| 0.781559
| 0
| 0.022417
| 0.409913
| 12,549
| 220
| 142
| 57.040909
| 0.866577
| 0.320504
| 0
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.011696
| 0
| 0.035088
| 0.005848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43c02d59d7e5fe6712f8dd70c8af0e36324912d3
| 4,986
|
py
|
Python
|
test_autolens/unit/plot/test_fit_interferometer_plots.py
|
harshitjindal/PyAutoLens
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
[
"MIT"
] | 1
|
2020-04-06T20:07:56.000Z
|
2020-04-06T20:07:56.000Z
|
test_autolens/unit/plot/test_fit_interferometer_plots.py
|
harshitjindal/PyAutoLens
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
[
"MIT"
] | null | null | null |
test_autolens/unit/plot/test_fit_interferometer_plots.py
|
harshitjindal/PyAutoLens
|
f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035
|
[
"MIT"
] | null | null | null |
import pytest
import os
import autolens.plot as aplt
@pytest.fixture(name="plot_path")
def make_fit_interferometer_plotter_setup():
return "{}/../test_files/plotting/fit/".format(
os.path.dirname(os.path.realpath(__file__))
)
def test__fit_quantities_are_output(fit_interferometer_7, plot_path, plot_patch):
aplt.fit_interferometer.visibilities(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "visibilities.png" in plot_patch.paths
aplt.fit_interferometer.noise_map(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "noise_map.png" in plot_patch.paths
aplt.fit_interferometer.signal_to_noise_map(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "signal_to_noise_map.png" in plot_patch.paths
aplt.fit_interferometer.model_visibilities(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "model_visibilities.png" in plot_patch.paths
aplt.fit_interferometer.residual_map_vs_uv_distances(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "residual_map_vs_uv_distances_real.png" in plot_patch.paths
aplt.fit_interferometer.residual_map_vs_uv_distances(
fit=fit_interferometer_7,
plot_real=False,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "residual_map_vs_uv_distances_imag.png" in plot_patch.paths
aplt.fit_interferometer.normalized_residual_map_vs_uv_distances(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert (
plot_path + "normalized_residual_map_vs_uv_distances_real.png"
in plot_patch.paths
)
aplt.fit_interferometer.normalized_residual_map_vs_uv_distances(
fit=fit_interferometer_7,
plot_real=False,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert (
plot_path + "normalized_residual_map_vs_uv_distances_imag.png"
in plot_patch.paths
)
aplt.fit_interferometer.chi_squared_map_vs_uv_distances(
fit=fit_interferometer_7,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "chi_squared_map_vs_uv_distances_real.png" in plot_patch.paths
aplt.fit_interferometer.chi_squared_map_vs_uv_distances(
fit=fit_interferometer_7,
plot_real=False,
plotter=aplt.Plotter(output=aplt.Output(path=plot_path, format="png")),
)
assert plot_path + "chi_squared_map_vs_uv_distances_imag.png" in plot_patch.paths
def test__fit_sub_plot(
masked_interferometer_fit_x2_plane_7x7, include_all, plot_path, plot_patch
):
aplt.fit_interferometer.subplot_fit_interferometer(
fit=masked_interferometer_fit_x2_plane_7x7,
include=include_all,
sub_plotter=aplt.SubPlotter(output=aplt.Output(plot_path, format="png")),
)
assert plot_path + "subplot_fit_interferometer.png" in plot_patch.paths
def test__fit_sub_plot_real_space(
masked_interferometer_fit_x2_plane_7x7, include_all, plot_path, plot_patch
):
aplt.fit_interferometer.subplot_fit_real_space(
fit=masked_interferometer_fit_x2_plane_7x7,
include=include_all,
sub_plotter=aplt.SubPlotter(output=aplt.Output(plot_path, format="png")),
)
assert plot_path + "subplot_fit_real_space.png" in plot_patch.paths
def test__fit_individuals__source_and_lens__depedent_on_input(
masked_interferometer_fit_x1_plane_7x7,
masked_interferometer_fit_x2_plane_7x7,
include_all,
plot_path,
plot_patch,
):
aplt.fit_interferometer.individuals(
fit=masked_interferometer_fit_x1_plane_7x7,
plot_visibilities=True,
plot_noise_map=False,
plot_signal_to_noise_map=False,
plot_model_visibilities=True,
plot_chi_squared_map=True,
include=include_all,
plotter=aplt.Plotter(output=aplt.Output(plot_path, format="png")),
)
assert plot_path + "visibilities.png" in plot_patch.paths
assert plot_path + "noise_map.png" not in plot_patch.paths
assert plot_path + "signal_to_noise_map.png" not in plot_patch.paths
assert plot_path + "model_visibilities.png" in plot_patch.paths
assert plot_path + "residual_map_vs_uv_distances_real.png" not in plot_patch.paths
assert (
plot_path + "normalized_residual_map_vs_uv_distances_real.png"
not in plot_patch.paths
)
assert plot_path + "chi_squared_map_vs_uv_distances_real.png" in plot_patch.paths
| 31.757962
| 86
| 0.738869
| 684
| 4,986
| 4.959064
| 0.100877
| 0.087264
| 0.07842
| 0.089623
| 0.864092
| 0.864092
| 0.839328
| 0.828125
| 0.814858
| 0.80454
| 0
| 0.007763
| 0.173285
| 4,986
| 156
| 87
| 31.961538
| 0.815138
| 0
| 0
| 0.477876
| 0
| 0
| 0.131769
| 0.110509
| 0
| 0
| 0
| 0
| 0.168142
| 1
| 0.044248
| false
| 0
| 0.026549
| 0.00885
| 0.079646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43cfd45c767ce3c9295da1872d5936e4a6c417a8
| 101
|
py
|
Python
|
terrascript/pingdom/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 4
|
2022-02-07T21:08:14.000Z
|
2022-03-03T04:41:28.000Z
|
terrascript/pingdom/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/pingdom/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T01:49:42.000Z
|
2022-02-08T14:15:00.000Z
|
# terrascript/pingdom/__init__.py
import terrascript
class pingdom(terrascript.Provider):
pass
| 14.428571
| 36
| 0.792079
| 11
| 101
| 6.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 6
| 37
| 16.833333
| 0.863636
| 0.306931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
78e44240625f1f2ff6981df2b4667d859204fc0b
| 199
|
py
|
Python
|
geta/name.py
|
shnewto/geta
|
fbe305f76a07bf97de14342e7fe2d5e7655b1a93
|
[
"MIT"
] | null | null | null |
geta/name.py
|
shnewto/geta
|
fbe305f76a07bf97de14342e7fe2d5e7655b1a93
|
[
"MIT"
] | null | null | null |
geta/name.py
|
shnewto/geta
|
fbe305f76a07bf97de14342e7fe2d5e7655b1a93
|
[
"MIT"
] | null | null | null |
from enum import Enum, auto
import names
def first_name():
return names.get_first_name()
def last_name():
return names.get_last_name()
def full_name():
return names.get_full_name()
| 13.266667
| 33
| 0.718593
| 31
| 199
| 4.322581
| 0.387097
| 0.223881
| 0.335821
| 0.402985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18593
| 199
| 14
| 34
| 14.214286
| 0.82716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| true
| 0
| 0.25
| 0.375
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
600042b158cd70f1251d124e612b3841ec6008f3
| 4,511
|
py
|
Python
|
tests/test_defi_pulse.py
|
jhhb/pydefipulsedata
|
0c48537dd054d1b7756bf07e300db434115e9307
|
[
"MIT"
] | 3
|
2021-06-14T14:41:40.000Z
|
2022-03-11T15:21:37.000Z
|
tests/test_defi_pulse.py
|
jhhb/pydefipulsedata
|
0c48537dd054d1b7756bf07e300db434115e9307
|
[
"MIT"
] | 1
|
2021-06-17T10:05:23.000Z
|
2021-06-20T18:03:11.000Z
|
tests/test_defi_pulse.py
|
jhhb/pydefipulsedata
|
0c48537dd054d1b7756bf07e300db434115e9307
|
[
"MIT"
] | 1
|
2022-01-17T11:35:10.000Z
|
2022-01-17T11:35:10.000Z
|
import unittest
import responses
from defipulsedata import DefiPulse
EMPTY_BLOB = {}
class TestWrapper(unittest.TestCase):
@responses.activate
def test_simple_endpoints(self):
client = DefiPulse(api_key='mock-key')
simple_endpoint_urls = [
(
client.get_market_data,
'https://data-api.defipulse.com/api/v1/defipulse/api/MarketData?api-key=mock-key',
),
(
client.get_projects,
'https://data-api.defipulse.com/api/v1/defipulse/api/GetProjects?api-key=mock-key',
),
(
client.get_lending_tokens,
'https://data-api.defipulse.com/api/v1/defipulse/api/GetLendingTokens?api-key=mock-key',
),
(
client.get_lending_market_data,
'https://data-api.defipulse.com/api/v1/defipulse/api/LendingMarketData?api-key=mock-key',
),
(
client.get_lending_projects,
'https://data-api.defipulse.com/api/v1/defipulse/api/GetLendingProjects?api-key=mock-key',
),
]
for fn, url in simple_endpoint_urls:
responses.reset()
responses.add(responses.GET, url, json=EMPTY_BLOB, status=200)
fn()
self.assertEqual(responses.calls[0].request.url, url)
@responses.activate
def test_get_history(self):
client = DefiPulse(api_key='mock-key')
url = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetHistory?api-key=mock-key'
responses.add(responses.GET, url, json=EMPTY_BLOB, status=200)
client.get_history()
self.assertEqual(responses.calls[0].request.url, url)
responses.reset()
url_with_invalid_param_combination = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetHistory?period=period&length=length&api-key=mock-key'
responses.add(
responses.GET,
url_with_invalid_param_combination,
json=EMPTY_BLOB,
status=200,
)
client.get_history(params={'period': 'period', 'length': 'length'})
self.assertEqual(
responses.calls[0].request.url,
url_with_invalid_param_combination,
)
self.assertWarnsRegex(
UserWarning, 'API only supports "period" or "length" params exclusively.'
)
@responses.activate
def test_get_lending_history(self):
client = DefiPulse(api_key='mock-key')
url = 'https://data-api.defipulse.com/api/v1/defipulse/api/getLendingHistory?api-key=mock-key'
responses.add(responses.GET, url, json=EMPTY_BLOB, status=200)
client.get_lending_history()
self.assertEqual(
responses.calls[0].request.url,
url,
)
responses.reset()
url_with_invalid_param_combination = 'https://data-api.defipulse.com/api/v1/defipulse/api/getLendingHistory?period=period&length=length&api-key=mock-key'
responses.add(
responses.GET,
url_with_invalid_param_combination,
json=EMPTY_BLOB,
status=200,
)
client.get_lending_history(params={'period': 'period', 'length': 'length'})
self.assertEqual(
responses.calls[0].request.url,
url_with_invalid_param_combination,
)
self.assertWarnsRegex(
UserWarning, 'API only supports "period" or "length" params exclusively.'
)
@responses.activate
def test_get_rates(self):
client = DefiPulse(api_key='mock-key')
url_without_amount = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetRates?token=DAI&api-key=mock-key'
responses.add(responses.GET, url_without_amount, json=EMPTY_BLOB, status=200)
client.get_rates(token='DAI')
self.assertEqual(
responses.calls[0].request.url,
url_without_amount,
'it does not include amount as a query param',
)
responses.reset()
url_with_amount = 'https://data-api.defipulse.com/api/v1/defipulse/api/GetRates?token=DAI&amount=100&api-key=mock-key'
responses.add(responses.GET, url_with_amount, json=EMPTY_BLOB, status=200)
client.get_rates(token='DAI', amount=100)
self.assertEqual(
responses.calls[0].request.url,
url_with_amount,
'it includes the amount as a query param',
)
| 35.242188
| 161
| 0.616493
| 517
| 4,511
| 5.226306
| 0.158607
| 0.066617
| 0.055514
| 0.072169
| 0.84604
| 0.825315
| 0.817172
| 0.773131
| 0.744264
| 0.672465
| 0
| 0.013608
| 0.266903
| 4,511
| 127
| 162
| 35.519685
| 0.803447
| 0
| 0
| 0.461538
| 0
| 0.105769
| 0.281977
| 0
| 0
| 0
| 0
| 0
| 0.086538
| 1
| 0.038462
| false
| 0
| 0.028846
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
600691bc6325ffae4fff28f579686fc7156dafb5
| 7,542
|
py
|
Python
|
main/tests/test_images.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | null | null | null |
main/tests/test_images.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | null | null | null |
main/tests/test_images.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from main import models
class ImageCreateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
def test_image_upload(self):
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.assertTrue(models.Image.objects.filter(name="vulf").exists())
self.assertEqual(models.Image.objects.get(name="vulf").extension, "jpeg")
self.assertIsNotNone(models.Image.objects.get(name="vulf").slug)
class ImageCreateAnonTestCase(TestCase):
def test_image_upload_anon(self):
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
response = self.client.post(reverse("image_list"), {"file": fp})
self.assertEqual(response.status_code, 302)
self.assertTrue(reverse("login") in response.url)
class ImageDetailTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_detail(self):
response = self.client.get(
reverse("image_detail", args=(self.image.slug,)),
)
self.assertEqual(response.status_code, 200)
self.assertInHTML("<h1>vulf</h1>", response.content.decode("utf-8"))
self.assertContains(response, "Uploaded on")
class ImageRawTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_raw(self):
response = self.client.get(
reverse("image_raw", args=(self.image.slug, self.image.extension)),
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.image.data.tobytes(), response.content)
class ImageRawWrongExtTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_raw(self):
response = self.client.get(
reverse("image_raw", args=(self.image.slug, "png")),
)
self.assertEqual(response.status_code, 404)
class ImageRawNotFoundTestCase(TestCase):
def setUp(self):
self.slug = "nonexistent-slug"
self.extension = "jpeg"
def test_image_raw(self):
response = self.client.get(
reverse("image_raw", args=(self.slug, self.extension)),
)
self.assertEqual(response.status_code, 404)
class ImageUpdateTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_update(self):
new_data = {
"name": "new vulf",
}
self.client.post(reverse("image_update", args=(self.image.slug,)), new_data)
updated_image = models.Image.objects.get(id=self.image.id)
self.assertEqual(updated_image.name, new_data["name"])
class ImageUpdateAnonTestCase(TestCase):
"""Tests non logged in user cannot update image."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
def test_image_update(self):
new_data = {
"name": "new vulf",
}
self.client.post(reverse("image_update", args=(self.image.slug,)), new_data)
image_now = models.Image.objects.get(id=self.image.id)
self.assertEqual(image_now.name, "vulf")
class ImageUpdateNotOwnTestCase(TestCase):
"""Tests user cannot update other user's image name."""
def setUp(self):
self.victim = models.User.objects.create(username="bob")
self.client.force_login(self.victim)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
self.attacker = models.User.objects.create(username="alice")
self.client.force_login(self.attacker)
def test_image_update_not_own(self):
new_data = {
"name": "bad vulf",
}
self.client.post(reverse("image_update", args=(self.image.slug,)), new_data)
image_now = models.Image.objects.get(id=self.image.id)
self.assertEqual(image_now.name, "vulf")
class ImageDeleteTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
def test_image_delete(self):
self.client.post(reverse("image_delete", args=(self.image.slug,)))
self.assertFalse(
models.Image.objects.filter(name="vulf", owner=self.user).exists()
)
class ImageDeleteAnonTestCase(TestCase):
"""Tests non logged in user cannot delete image."""
def setUp(self):
self.user = models.User.objects.create(username="alice")
self.client.force_login(self.user)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
def test_image_delete_anon(self):
self.client.post(reverse("image_delete", args=(self.image.slug,)))
self.assertTrue(
models.Image.objects.filter(name="vulf", owner=self.user).exists()
)
class ImageDeleteNotOwnTestCase(TestCase):
"""Tests user cannot delete other's image."""
def setUp(self):
self.victim = models.User.objects.create(username="bob")
self.client.force_login(self.victim)
with open("main/tests/testdata/vulf.jpeg", "rb") as fp:
self.client.post(reverse("image_list"), {"file": fp})
self.image = models.Image.objects.get(name="vulf")
self.client.logout()
self.attacker = models.User.objects.create(username="alice")
self.client.force_login(self.attacker)
def test_image_delete_not_own(self):
self.client.post(reverse("image_delete", args=(self.image.slug,)))
self.assertTrue(
models.Image.objects.filter(name="vulf", owner=self.victim).exists()
)
| 37.71
| 85
| 0.642668
| 943
| 7,542
| 5.054083
| 0.107105
| 0.077633
| 0.067982
| 0.074906
| 0.806966
| 0.789341
| 0.773395
| 0.717373
| 0.708561
| 0.700168
| 0
| 0.003017
| 0.208831
| 7,542
| 199
| 86
| 37.899497
| 0.79571
| 0.023999
| 0
| 0.660131
| 0
| 0
| 0.113215
| 0.04346
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.150327
| false
| 0
| 0.019608
| 0
| 0.248366
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
607817abfd8937ede10d732414373c42cf24fdbe
| 18
|
py
|
Python
|
dist/micropy-cli/frozen/re.py
|
kevindawson/Pico-Stub
|
6f9112779d4d81f821a3af273a450b9329ccdbab
|
[
"Apache-2.0"
] | 19
|
2021-01-25T23:56:09.000Z
|
2022-02-21T13:55:16.000Z
|
dist/micropy-cli/frozen/re.py
|
kevindawson/Pico-Stub
|
6f9112779d4d81f821a3af273a450b9329ccdbab
|
[
"Apache-2.0"
] | 18
|
2021-02-06T09:03:09.000Z
|
2021-10-04T16:36:35.000Z
|
dist/micropy-cli/frozen/re.py
|
kevindawson/Pico-Stub
|
6f9112779d4d81f821a3af273a450b9329ccdbab
|
[
"Apache-2.0"
] | 6
|
2021-01-26T08:41:47.000Z
|
2021-04-27T11:33:33.000Z
|
from ure import *
| 9
| 17
| 0.722222
| 3
| 18
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 1
| 18
| 18
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60938f1234d3562d3e5714aaa6623f011ca4836d
| 2,531
|
py
|
Python
|
src/leetcode_1961_check_if_string_is_a_prefix_of_array.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_1961_check_if_string_is_a_prefix_of_array.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_1961_check_if_string_is_a_prefix_of_array.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
# @l2g 1961 python3
# [1961] Check If String Is a Prefix of Array
# Difficulty: Easy
# https://leetcode.com/problems/check-if-string-is-a-prefix-of-array
#
# Given a string s and an array of strings words, determine whether s is a prefix string of words.
# A string s is a prefix string of words if s can be made by concatenating the first k strings in words for some positive k no larger than words.
# length.
# Return true if s is a prefix string of words, or false otherwise.
#
# Example 1:
#
# Input: s = "iloveleetcode", words = ["i","love","leetcode","apples"]
# Output: true
# Explanation:
# s can be made by concatenating "i", "love", and "leetcode" together.
#
# Example 2:
#
# Input: s = "iloveleetcode", words = ["apples","i","love","leetcode"]
# Output: false
# Explanation:
# It is impossible to make s using a prefix of arr.
#
# Constraints:
#
# 1 <= words.length <= 100
# 1 <= words[i].length <= 20
# 1 <= s.length <= 1000
# words[i] and s consist of only lowercase English letters.
#
#
# @l2g 1961 python3
# [1961] Check If String Is a Prefix of Array
# Difficulty: Easy
# https://leetcode.com/problems/check-if-string-is-a-prefix-of-array
#
# Given a string s and an array of strings words, determine whether s is a prefix string of words.
# A string s is a prefix string of words if s can be made by concatenating the first k strings in words for some positive k no larger than words.
# length.
# Return true if s is a prefix string of words, or false otherwise.
#
# Example 1:
#
# Input: s = "iloveleetcode", words = ["i","love","leetcode","apples"]
# Output: true
# Explanation:
# s can be made by concatenating "i", "love", and "leetcode" together.
#
# Example 2:
#
# Input: s = "iloveleetcode", words = ["apples","i","love","leetcode"]
# Output: false
# Explanation:
# It is impossible to make s using a prefix of arr.
#
# Constraints:
#
# 1 <= words.length <= 100
# 1 <= words[i].length <= 20
# 1 <= s.length <= 1000
# words[i] and s consist of only lowercase English letters.
#
#
from typing import List
class Solution:
def isPrefixString(self, s: str, words: List[str]) -> bool:
pos, word_idx = 0, 0
while pos < len(s) and word_idx < len(words):
if s[pos : pos + len(words[word_idx])] != words[word_idx]:
return False
pos += len(words[word_idx])
word_idx += 1
return True if pos == len(s) else False
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1961.py")])
| 28.438202
| 145
| 0.663374
| 396
| 2,531
| 4.20202
| 0.25
| 0.050481
| 0.054087
| 0.036058
| 0.852163
| 0.830529
| 0.830529
| 0.830529
| 0.830529
| 0.830529
| 0
| 0.027418
| 0.207428
| 2,531
| 88
| 146
| 28.761364
| 0.802094
| 0.740814
| 0
| 0
| 0
| 0
| 0.042301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60ac6617cce616538b3ec32a3d626a9b6fb521d2
| 3,208
|
py
|
Python
|
tests/test_module.py
|
prbpedro/simple_peewee_flask_webapi
|
9f70e2cf034d2ff53b6c730f1362e03a37cc9a59
|
[
"MIT"
] | null | null | null |
tests/test_module.py
|
prbpedro/simple_peewee_flask_webapi
|
9f70e2cf034d2ff53b6c730f1362e03a37cc9a59
|
[
"MIT"
] | null | null | null |
tests/test_module.py
|
prbpedro/simple_peewee_flask_webapi
|
9f70e2cf034d2ff53b6c730f1362e03a37cc9a59
|
[
"MIT"
] | null | null | null |
import unittest
import simple_peewee_flask_webapi
class ModuleTest(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName)
self.test_client = None
def setUp(self):
simple_peewee_flask_webapi.application_start.app.config[
'TESTING'] = True
a = simple_peewee_flask_webapi.application_start.app.test_client()
self.test_client = a
def test(self):
try:
url = "http://127.0.0.1:5000/get-models/"
payload = {"id_join_table": "1", "id_simple_table": "1"}
headers = {'Content-Type': "application/x-www-form-urlencoded"}
response = self.test_client.post(url, data=payload,
headers=headers)
self.assertEqual(response.status_code, 200)
url = "http://127.0.0.1:5000/get-models/"
payload = {"id_join_table": "2", "id_simple_table": "1"}
headers = {'Content-Type': "application/x-www-form-urlencoded"}
response = self.test_client.post(url, data=payload,
headers=headers)
self.assertEqual(response.status_code, 404)
url = "http://127.0.0.1:5000/get-models/"
payload = {"id_join_table": "1", "id_simple_table": "2"}
headers = {'Content-Type': "application/x-www-form-urlencoded"}
response = self.test_client.post(url, data=payload,
headers=headers)
self.assertEqual(response.status_code, 404)
response = self.test_client.get(
"http://127.0.0.1:5000/simple-table/?id_simple_table=1")
self.assertEqual(response.status_code, 200)
url = "http://127.0.0.1:5000/simple-table/"
payload = "id_simple_table=1"
headers = {'Content-Type': "application/x-www-form-urlencoded"}
response = self.test_client.post(url, data=payload,
headers=headers)
self.assertEqual(response.status_code, 200)
response = self.test_client.get(
"http://127.0.0.1:5000/simple-table/?id_simple_table=2")
self.assertEqual(response.status_code, 404)
url = "http://127.0.0.1:5000/join_table/"
payload = "id_join_table=1"
headers = {'Content-Type': "application/x-www-form-urlencoded"}
response = self.test_client.post(url, data=payload,
headers=headers)
self.assertEqual(response.status_code, 200)
response = self.test_client.get(
"http://127.0.0.1:5000/join_table/?id_join_table=1")
self.assertEqual(response.status_code, 200)
response = self.test_client.get(
"http://127.0.0.1:5000/join_table/?id_join_table=2")
self.assertEqual(response.status_code, 404)
except Exception as e:
print(e)
self.assertFalse(True)
if __name__ == "__main__":
unittest.main()
| 42.210526
| 76
| 0.552681
| 361
| 3,208
| 4.703601
| 0.174515
| 0.070671
| 0.090695
| 0.047703
| 0.838045
| 0.830389
| 0.830389
| 0.780919
| 0.745583
| 0.745583
| 0
| 0.059502
| 0.32419
| 3,208
| 75
| 77
| 42.773333
| 0.723708
| 0
| 0
| 0.516667
| 0
| 0
| 0.233961
| 0.052665
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.05
| false
| 0
| 0.033333
| 0
| 0.1
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60bf56d1e26a3ba1cfe408ae256ce89427dc45a1
| 36
|
py
|
Python
|
specklepy/transports/server/__init__.py
|
AntoineDao/specklepy
|
8566674f2ed9e84b8aa7ac310e39003d596ed2fd
|
[
"Apache-2.0"
] | 26
|
2020-12-01T10:00:13.000Z
|
2021-08-04T02:12:32.000Z
|
specklepy/transports/server/__init__.py
|
AntoineDao/specklepy
|
8566674f2ed9e84b8aa7ac310e39003d596ed2fd
|
[
"Apache-2.0"
] | 51
|
2021-08-06T15:54:54.000Z
|
2022-03-24T10:36:30.000Z
|
specklepy/transports/server/__init__.py
|
AntoineDao/specklepy
|
8566674f2ed9e84b8aa7ac310e39003d596ed2fd
|
[
"Apache-2.0"
] | 7
|
2020-12-22T15:37:17.000Z
|
2021-07-29T14:44:09.000Z
|
from .server import ServerTransport
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60cb3a38bf4adb53d9a18657a5cabb53e3e06f50
| 145
|
py
|
Python
|
qp/__init__.py
|
meshch/qp
|
4f19841769c644ffff3eff297cacf6aeb2ac2cbc
|
[
"MIT"
] | 4
|
2016-12-06T17:51:45.000Z
|
2019-11-15T12:27:24.000Z
|
qp/__init__.py
|
meshch/qp
|
4f19841769c644ffff3eff297cacf6aeb2ac2cbc
|
[
"MIT"
] | 74
|
2016-11-15T22:11:56.000Z
|
2022-03-30T15:38:03.000Z
|
qp/__init__.py
|
meshch/qp
|
4f19841769c644ffff3eff297cacf6aeb2ac2cbc
|
[
"MIT"
] | 7
|
2017-04-04T19:46:21.000Z
|
2021-05-19T06:02:07.000Z
|
from composite import *
from ensemble import *
from metrics import *
# from parametrization import *
from pdf import *
from utils import *
| 20.714286
| 32
| 0.737931
| 18
| 145
| 5.944444
| 0.444444
| 0.46729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213793
| 145
| 6
| 33
| 24.166667
| 0.938596
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60dd7cf9628213cdb3eb307cdd6c83e2c17b2415
| 7,836
|
py
|
Python
|
lldb/test/API/functionalities/limit-debug-info/TestLimitDebugInfo.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/test/API/functionalities/limit-debug-info/TestLimitDebugInfo.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/test/API/functionalities/limit-debug-info/TestLimitDebugInfo.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
"""
Test completing types using information from other shared libraries.
"""
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class LimitDebugInfoTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def _check_type(self, target, name):
exe = target.FindModule(lldb.SBFileSpec("a.out"))
type_ = exe.FindFirstType(name)
self.trace("type_: %s"%type_)
self.assertTrue(type_)
base = type_.GetDirectBaseClassAtIndex(0).GetType()
self.trace("base:%s"%base)
self.assertTrue(base)
self.assertEquals(base.GetNumberOfFields(), 0)
def _check_debug_info_is_limited(self, target):
# Without other shared libraries we should only see the member declared
# in the derived class. This serves as a sanity check that we are truly
# building with limited debug info.
self._check_type(target, "InheritsFromOne")
self._check_type(target, "InheritsFromTwo")
@skipIf(bugnumber="pr46284", debug_info="gmodules")
@skipIfWindows # Clang emits type info even with -flimit-debug-info
# Requires DW_CC_pass_by_* attributes from Clang 7 to correctly call
# by-value functions.
@skipIf(compiler="clang", compiler_version=['<', '7.0'])
def test_one_and_two_debug(self):
self.build()
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self._check_debug_info_is_limited(target)
lldbutil.run_to_name_breakpoint(self, "main",
extra_images=["one", "two"])
# But when other shared libraries are loaded, we should be able to see
# all members.
self.expect_expr("inherits_from_one.member", result_value="47")
self.expect_expr("inherits_from_one.one", result_value="142")
self.expect_expr("inherits_from_two.member", result_value="47")
self.expect_expr("inherits_from_two.one", result_value="142")
self.expect_expr("inherits_from_two.two", result_value="242")
self.expect_expr("one_as_member.member", result_value="47")
self.expect_expr("one_as_member.one.member", result_value="147")
self.expect_expr("two_as_member.member", result_value="47")
self.expect_expr("two_as_member.two.one.member", result_value="147")
self.expect_expr("two_as_member.two.member", result_value="247")
self.expect_expr("array_of_one[2].member", result_value="174")
self.expect_expr("array_of_two[2].one[2].member", result_value="174")
self.expect_expr("array_of_two[2].member", result_value="274")
self.expect_expr("get_one().member", result_value="124")
self.expect_expr("get_two().one().member", result_value="124")
self.expect_expr("get_two().member", result_value="224")
self.expect_expr("shadowed_one.member", result_value="47")
self.expect_expr("shadowed_one.one", result_value="142")
@skipIf(bugnumber="pr46284", debug_info="gmodules")
@skipIfWindows # Clang emits type info even with -flimit-debug-info
# Requires DW_CC_pass_by_* attributes from Clang 7 to correctly call
# by-value functions.
@skipIf(compiler="clang", compiler_version=['<', '7.0'])
def test_two_debug(self):
self.build(dictionary=dict(STRIP_ONE="1"))
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self._check_debug_info_is_limited(target)
lldbutil.run_to_name_breakpoint(self, "main",
extra_images=["one", "two"])
# This time, we should only see the members from the second library.
self.expect_expr("inherits_from_one.member", result_value="47")
self.expect("expr inherits_from_one.one", error=True,
substrs=["no member named 'one' in 'InheritsFromOne'"])
self.expect_expr("inherits_from_two.member", result_value="47")
self.expect("expr inherits_from_two.one", error=True,
substrs=["no member named 'one' in 'InheritsFromTwo'"])
self.expect_expr("inherits_from_two.two", result_value="242")
self.expect_expr("one_as_member.member", result_value="47")
self.expect("expr one_as_member.one.member", error=True,
substrs=["no member named 'member' in 'member::One'"])
self.expect_expr("two_as_member.member", result_value="47")
self.expect("expr two_as_member.two.one.member", error=True,
substrs=["no member named 'member' in 'member::One'"])
self.expect_expr("two_as_member.two.member", result_value="247")
self.expect("expr array_of_one[2].member", error=True,
substrs=["no member named 'member' in 'array::One'"])
self.expect("expr array_of_two[2].one[2].member", error=True,
substrs=["no member named 'member' in 'array::One'"])
self.expect_expr("array_of_two[2].member", result_value="274")
self.expect("expr get_one().member", error=True,
substrs=["calling 'get_one' with incomplete return type 'result::One'"])
self.expect("expr get_two().one().member", error=True,
substrs=["calling 'one' with incomplete return type 'result::One'"])
self.expect_expr("get_two().member", result_value="224")
@skipIf(bugnumber="pr46284", debug_info="gmodules")
@skipIfWindows # Clang emits type info even with -flimit-debug-info
# Requires DW_CC_pass_by_* attributes from Clang 7 to correctly call
# by-value functions.
@skipIf(compiler="clang", compiler_version=['<', '7.0'])
def test_one_debug(self):
self.build(dictionary=dict(STRIP_TWO="1"))
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self._check_debug_info_is_limited(target)
lldbutil.run_to_name_breakpoint(self, "main",
extra_images=["one", "two"])
# In this case we should only see the members from the second library.
# Note that we cannot see inherits_from_two.one because without debug
# info for "Two", we cannot determine that it in fact inherits from
# "One".
self.expect_expr("inherits_from_one.member", result_value="47")
self.expect_expr("inherits_from_one.one", result_value="142")
self.expect_expr("inherits_from_two.member", result_value="47")
self.expect("expr inherits_from_two.one", error=True,
substrs=["no member named 'one' in 'InheritsFromTwo'"])
self.expect("expr inherits_from_two.two", error=True,
substrs=["no member named 'two' in 'InheritsFromTwo'"])
self.expect_expr("one_as_member.member", result_value="47")
self.expect_expr("one_as_member.one.member", result_value="147")
self.expect_expr("two_as_member.member", result_value="47")
self.expect("expr two_as_member.two.one.member", error=True,
substrs=["no member named 'one' in 'member::Two'"])
self.expect("expr two_as_member.two.member", error=True,
substrs=["no member named 'member' in 'member::Two'"])
self.expect_expr("array_of_one[2].member", result_value="174")
self.expect("expr array_of_two[2].one[2].member", error=True,
substrs=["no member named 'one' in 'array::Two'"])
self.expect("expr array_of_two[2].member", error=True,
substrs=["no member named 'member' in 'array::Two'"])
self.expect_expr("get_one().member", result_value="124")
self.expect("expr get_two().one().member", error=True,
substrs=["calling 'get_two' with incomplete return type 'result::Two'"])
self.expect("expr get_two().member", error=True,
substrs=["calling 'get_two' with incomplete return type 'result::Two'"])
| 48.975
| 88
| 0.66705
| 1,051
| 7,836
| 4.746908
| 0.143673
| 0.10022
| 0.140309
| 0.066146
| 0.819804
| 0.793345
| 0.778914
| 0.75887
| 0.741231
| 0.724394
| 0
| 0.020512
| 0.197422
| 7,836
| 159
| 89
| 49.283019
| 0.772778
| 0.129658
| 0
| 0.580357
| 0
| 0
| 0.317044
| 0.124522
| 0
| 0
| 0
| 0
| 0.026786
| 1
| 0.044643
| false
| 0
| 0.044643
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7170ed27d53ef4244c7dc1721aaf6f5897d6e748
| 27
|
py
|
Python
|
movi/__init__.py
|
jagru20/MOVIPiAPI
|
04d86c83d4169d55900c9c4b0cf5c7b930439ec8
|
[
"BSD-3-Clause"
] | 3
|
2019-05-22T13:50:01.000Z
|
2021-06-06T07:12:23.000Z
|
movi/__init__.py
|
jagru20/MOVIPiAPI
|
04d86c83d4169d55900c9c4b0cf5c7b930439ec8
|
[
"BSD-3-Clause"
] | 2
|
2018-07-30T02:12:25.000Z
|
2018-07-30T02:41:32.000Z
|
movi/__init__.py
|
jagru20/MOVIPiAPI
|
04d86c83d4169d55900c9c4b0cf5c7b930439ec8
|
[
"BSD-3-Clause"
] | 2
|
2019-01-23T20:58:28.000Z
|
2020-10-24T21:30:26.000Z
|
from movi.MOVI import MOVI
| 13.5
| 26
| 0.814815
| 5
| 27
| 4.4
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
71ce5105bbc354ad6538c16e90c51256d795e865
| 195
|
py
|
Python
|
PersonManage/jurisdiction/urls.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
PersonManage/jurisdiction/urls.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | 3
|
2021-03-19T01:28:43.000Z
|
2021-04-08T19:57:19.000Z
|
PersonManage/jurisdiction/urls.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('jurisdiction/', views.JurisdictionView.as_view()),
path('jurisdiction/<id>/', views.JurisdictionView.as_view()),
]
| 24.375
| 65
| 0.717949
| 22
| 195
| 6.272727
| 0.545455
| 0.231884
| 0.333333
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 195
| 7
| 66
| 27.857143
| 0.811765
| 0
| 0
| 0
| 0
| 0
| 0.158974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
71e19e5c7733e8a6d9a7bc8fe1f614ae439dfbe9
| 732
|
py
|
Python
|
displays/fonts/size5x8/__init__.py
|
jelinj8/pydPiper
|
afe742275cdbf52988a46c9e1ee9aab0d369a8c8
|
[
"MIT"
] | 72
|
2017-03-13T11:01:01.000Z
|
2021-11-29T20:53:53.000Z
|
displays/fonts/size5x8/__init__.py
|
jelinj8/pydPiper
|
afe742275cdbf52988a46c9e1ee9aab0d369a8c8
|
[
"MIT"
] | 126
|
2017-03-13T16:06:59.000Z
|
2022-03-27T14:14:49.000Z
|
displays/fonts/size5x8/__init__.py
|
jelinj8/pydPiper
|
afe742275cdbf52988a46c9e1ee9aab0d369a8c8
|
[
"MIT"
] | 41
|
2017-10-11T18:37:50.000Z
|
2021-06-18T17:02:45.000Z
|
__all__ = [ "player", "playing", "repeat_all", "repeat_once", "shuffle", "speaker", "volume", "system", "bigclock", "bigchars", "bigplay", "latin1" ]
try:
import player
except ImportError:
pass
try:
import playing
except ImportError:
pass
try:
import repeat_all
except ImportError:
pass
try:
import repeat_once
except ImportError:
pass
try:
import shuffle
except ImportError:
pass
try:
import speaker
except ImportError:
pass
try:
import volume
except ImportError:
pass
try:
import system
except ImportError:
pass
try:
import bigclock
except ImportError:
pass
try:
import bigchars
except ImportError:
pass
try:
import bigplay
except ImportError:
pass
try:
import latin1
except ImportError:
pass
| 11.619048
| 149
| 0.744536
| 89
| 732
| 6.033708
| 0.202247
| 0.201117
| 0.469274
| 0.49162
| 0.636872
| 0.134078
| 0
| 0
| 0
| 0
| 0
| 0.003295
| 0.170765
| 732
| 62
| 150
| 11.806452
| 0.881384
| 0
| 0
| 0.734694
| 0
| 0
| 0.121585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.244898
| 0.489796
| 0
| 0.489796
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
71e462ef5f68c2c8a565b1892f5c00ad3ae55d1d
| 166
|
py
|
Python
|
anmodel/__init__.py
|
DSPsleeporg/an_spindle
|
bebe90434628b8d50a2a7fcf5fb131fc5108a623
|
[
"BSD-3-Clause"
] | null | null | null |
anmodel/__init__.py
|
DSPsleeporg/an_spindle
|
bebe90434628b8d50a2a7fcf5fb131fc5108a623
|
[
"BSD-3-Clause"
] | null | null | null |
anmodel/__init__.py
|
DSPsleeporg/an_spindle
|
bebe90434628b8d50a2a7fcf5fb131fc5108a623
|
[
"BSD-3-Clause"
] | null | null | null |
import anmodel.analysis
import anmodel.channels
import anmodel.models
import anmodel.params
import anmodel.readinfo
import anmodel.search
import anmodel.search_manual
| 23.714286
| 28
| 0.879518
| 22
| 166
| 6.590909
| 0.409091
| 0.627586
| 0.262069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078313
| 166
| 7
| 28
| 23.714286
| 0.947712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e096ffb0487bac5aafd812a824fba2eda7fa7a6e
| 97
|
py
|
Python
|
tests/test_files/test_xfail_with_empty_reason.py
|
micheller/flake8-fine-pytest
|
8f16722fe97e67740f0af72b6867988ec31dfaf9
|
[
"MIT"
] | 4
|
2021-01-06T02:53:06.000Z
|
2022-02-24T14:11:23.000Z
|
tests/test_files/test_xfail_with_empty_reason.py
|
micheller/flake8-fine-pytest
|
8f16722fe97e67740f0af72b6867988ec31dfaf9
|
[
"MIT"
] | 7
|
2020-05-12T06:49:25.000Z
|
2022-03-05T05:03:25.000Z
|
tests/test_files/test_xfail_with_empty_reason.py
|
micheller/flake8-fine-pytest
|
8f16722fe97e67740f0af72b6867988ec31dfaf9
|
[
"MIT"
] | 6
|
2020-06-30T14:10:33.000Z
|
2020-12-21T10:19:01.000Z
|
import pytest
import datetime
@pytest.mark.xfail(reason='')
def test_xfail() -> None:
pass
| 12.125
| 29
| 0.701031
| 13
| 97
| 5.153846
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 97
| 7
| 30
| 13.857143
| 0.82716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
e0ae9da507728adcd7f26bdbd5016e7a9fa0e38d
| 176
|
py
|
Python
|
src/wai/annotations/imgstats/format/areahistogram/specifier/__init__.py
|
waikato-ufdl/wai-annotations-imgstats
|
9831044ad38bb3ce3ebe4be101f08f5ec881d965
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/imgstats/format/areahistogram/specifier/__init__.py
|
waikato-ufdl/wai-annotations-imgstats
|
9831044ad38bb3ce3ebe4be101f08f5ec881d965
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/imgstats/format/areahistogram/specifier/__init__.py
|
waikato-ufdl/wai-annotations-imgstats
|
9831044ad38bb3ce3ebe4be101f08f5ec881d965
|
[
"Apache-2.0"
] | null | null | null |
from ._AreaHistogramISOutputFormatSpecifier import AreaHistogramISOutputFormatSpecifier
from ._AreaHistogramODOutputFormatSpecifier import AreaHistogramODOutputFormatSpecifier
| 58.666667
| 87
| 0.943182
| 8
| 176
| 20.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 176
| 2
| 88
| 88
| 0.97619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0c8950e6c166eef8dae79deb2977d28e83041f6
| 204
|
py
|
Python
|
docs/cookbook/shortcuts/create_multiple_shortcuts.py
|
LuD1161/winshell
|
1509d211ab3403dd1cff6113e4e13462d6dec35b
|
[
"MIT"
] | 41
|
2015-02-06T19:15:07.000Z
|
2021-11-10T13:27:43.000Z
|
docs/cookbook/shortcuts/create_multiple_shortcuts.py
|
LuD1161/winshell
|
1509d211ab3403dd1cff6113e4e13462d6dec35b
|
[
"MIT"
] | 6
|
2015-04-13T12:36:55.000Z
|
2022-03-28T13:36:16.000Z
|
docs/cookbook/shortcuts/create_multiple_shortcuts.py
|
LuD1161/winshell
|
1509d211ab3403dd1cff6113e4e13462d6dec35b
|
[
"MIT"
] | 10
|
2015-01-14T07:20:42.000Z
|
2022-02-14T19:14:26.000Z
|
import os, sys
import winshell
shortcut = winshell.shortcut(sys.executable)
shortcut.write(os.path.join(winshell.desktop(), "python.lnk"))
shortcut.write(os.path.join(winshell.programs(), "python.lnk"))
| 29.142857
| 63
| 0.769608
| 28
| 204
| 5.607143
| 0.464286
| 0.203822
| 0.191083
| 0.242038
| 0.394904
| 0.394904
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063725
| 204
| 6
| 64
| 34
| 0.82199
| 0
| 0
| 0
| 0
| 0
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e0e72a9ce9c23d02bd03d9e5335e2b6d256e5035
| 67
|
py
|
Python
|
src/setup.py
|
trialanderror123/APSPT
|
1eae0075efd066443037d69d165199eb07e3ad9e
|
[
"MIT"
] | null | null | null |
src/setup.py
|
trialanderror123/APSPT
|
1eae0075efd066443037d69d165199eb07e3ad9e
|
[
"MIT"
] | null | null | null |
src/setup.py
|
trialanderror123/APSPT
|
1eae0075efd066443037d69d165199eb07e3ad9e
|
[
"MIT"
] | null | null | null |
import os
import sys
def setup():
sys.path.append(os.getcwd())
| 13.4
| 32
| 0.686567
| 11
| 67
| 4.181818
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 67
| 5
| 32
| 13.4
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cfa9dc17a75a9e4a947d52560fdd1e51c56f812
| 41
|
py
|
Python
|
limix_ext/lmm/_core/__init__.py
|
glimix/limix-ext
|
7cf7a3b2b02f6a73cbba90f1945a06b9295b7357
|
[
"MIT"
] | null | null | null |
limix_ext/lmm/_core/__init__.py
|
glimix/limix-ext
|
7cf7a3b2b02f6a73cbba90f1945a06b9295b7357
|
[
"MIT"
] | 2
|
2017-06-05T08:29:22.000Z
|
2017-06-07T16:54:54.000Z
|
limix_ext/lmm/_core/__init__.py
|
glimix/limix-ext
|
7cf7a3b2b02f6a73cbba90f1945a06b9295b7357
|
[
"MIT"
] | null | null | null |
from ._fastlmm import train_associations
| 20.5
| 40
| 0.878049
| 5
| 41
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cffa8d5ab9c50dc112abe12d918224cb0a87fda
| 20
|
py
|
Python
|
RDS/circle3_central_services/token_storage/src/api/Token/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 10
|
2020-06-24T08:22:24.000Z
|
2022-01-13T16:17:36.000Z
|
RDS/circle3_central_services/token_storage/src/api/Token/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 78
|
2020-01-23T14:32:06.000Z
|
2022-03-07T14:11:16.000Z
|
RDS/circle3_central_services/token_storage/src/api/Token/__init__.py
|
Sciebo-RDS/Sciebo-RDS
|
d71cf449ed045a2a7a049e2cb77c99fd5a9195bd
|
[
"MIT"
] | 1
|
2020-06-24T08:33:48.000Z
|
2020-06-24T08:33:48.000Z
|
from .Token import *
| 20
| 20
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e820ed99973ee9fda521947c2e623f19ea395ff5
| 137
|
py
|
Python
|
tests/basics/andor.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | 7
|
2019-10-18T13:41:39.000Z
|
2022-03-15T17:27:57.000Z
|
tests/basics/andor.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | null | null | null |
tests/basics/andor.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | 2
|
2020-06-23T09:10:15.000Z
|
2020-12-22T06:42:14.000Z
|
# test short circuit expressions outside if conditionals
print(() or 1)
print((1,) or 1)
print(() and 1)
print((1,) and 1)
print("PASS")
| 19.571429
| 56
| 0.678832
| 23
| 137
| 4.043478
| 0.521739
| 0.258065
| 0.172043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 0.153285
| 137
| 7
| 57
| 19.571429
| 0.75
| 0.394161
| 0
| 0
| 0
| 0
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
1c0c29277d52e48f06be4b15db65b27c8c83113d
| 217
|
py
|
Python
|
common/utils/database/heroku_db_creds.py
|
Jay206-Programmer/FEASTA_NEW
|
e32b47c74ec1cb3875bd31c4e6edecbd7094fd8c
|
[
"MIT"
] | null | null | null |
common/utils/database/heroku_db_creds.py
|
Jay206-Programmer/FEASTA_NEW
|
e32b47c74ec1cb3875bd31c4e6edecbd7094fd8c
|
[
"MIT"
] | null | null | null |
common/utils/database/heroku_db_creds.py
|
Jay206-Programmer/FEASTA_NEW
|
e32b47c74ec1cb3875bd31c4e6edecbd7094fd8c
|
[
"MIT"
] | null | null | null |
# DataBase Credentials
database="da665kfg2oc9og"
user = "aourrzrdjlrpjo"
password = "12359d0fa8d70aeea4d2ef3acd96eb794f178dee42887f7c350ad49a4d78e323"
host = "ec2-18-207-95-219.compute-1.amazonaws.com"
port = "5432"
| 36.166667
| 77
| 0.81106
| 20
| 217
| 8.8
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29
| 0.078341
| 217
| 6
| 78
| 36.166667
| 0.59
| 0.092166
| 0
| 0
| 0
| 0
| 0.702564
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
1c1637128109287112ec6f7cec843445deb6c292
| 159
|
py
|
Python
|
rinobot_plugin/__init__.py
|
rinocloud/rinobot-plugin
|
0196f2a5a01a85a2f4859755b262bf093cd4eb45
|
[
"MIT"
] | null | null | null |
rinobot_plugin/__init__.py
|
rinocloud/rinobot-plugin
|
0196f2a5a01a85a2f4859755b262bf093cd4eb45
|
[
"MIT"
] | null | null | null |
rinobot_plugin/__init__.py
|
rinocloud/rinobot-plugin
|
0196f2a5a01a85a2f4859755b262bf093cd4eb45
|
[
"MIT"
] | null | null | null |
# Rinobot-plugin python helpers
# API docs at http://github.com/rinocloud/rinobot-plugin
# Authors:
# Eoin Murray <eoin@rinocloud.com>
from .plugin import *
| 19.875
| 56
| 0.748428
| 22
| 159
| 5.409091
| 0.727273
| 0.218487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 159
| 7
| 57
| 22.714286
| 0.862319
| 0.792453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98e52dd7d930c2ce9f71ef03c6a78263a582d855
| 88
|
py
|
Python
|
mpl_format/animation/kwarg_animations/__init__.py
|
vahndi/mpl-format
|
b03f97c37968e55a35c7181d93616eb44fc55f05
|
[
"MIT"
] | null | null | null |
mpl_format/animation/kwarg_animations/__init__.py
|
vahndi/mpl-format
|
b03f97c37968e55a35c7181d93616eb44fc55f05
|
[
"MIT"
] | 51
|
2020-05-18T04:18:11.000Z
|
2022-02-01T02:35:59.000Z
|
mpl_format/animation/kwarg_animations/__init__.py
|
vahndi/mpl-format
|
b03f97c37968e55a35c7181d93616eb44fc55f05
|
[
"MIT"
] | null | null | null |
from .color_animation import ColorAnimation
from .float_animation import FloatAnimation
| 29.333333
| 43
| 0.886364
| 10
| 88
| 7.6
| 0.7
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c72f664e1df6f5baec9838ad2fae096bcbfc0c12
| 55,098
|
py
|
Python
|
tests/unit/aiplatform/test_automl_tabular_training_jobs.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/aiplatform/test_automl_tabular_training_jobs.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/aiplatform/test_automl_tabular_training_jobs.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import pytest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import training_jobs
from google.cloud.aiplatform.compat.services import (
model_service_client,
pipeline_service_client,
)
from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
model as gca_model,
pipeline_state as gca_pipeline_state,
training_pipeline as gca_training_pipeline,
)
from google.protobuf import json_format
from google.protobuf import struct_pb2
_TEST_BUCKET_NAME = "test-bucket"
_TEST_GCS_PATH_WITHOUT_BUCKET = "path/to/folder"
_TEST_GCS_PATH = f"{_TEST_BUCKET_NAME}/{_TEST_GCS_PATH_WITHOUT_BUCKET}"
_TEST_GCS_PATH_WITH_TRAILING_SLASH = f"{_TEST_GCS_PATH}/"
_TEST_PROJECT = "test-project"
_TEST_DATASET_DISPLAY_NAME = "test-dataset-display-name"
_TEST_DATASET_NAME = "test-dataset-name"
_TEST_DISPLAY_NAME = "test-display-name"
_TEST_METADATA_SCHEMA_URI_TABULAR = schema.dataset.metadata.tabular
_TEST_METADATA_SCHEMA_URI_NONTABULAR = schema.dataset.metadata.image
_TEST_TRAINING_COLUMN_NAMES = [
"sepal_width",
"sepal_length",
"petal_length",
"petal_width",
"target",
]
_TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE = [
"apple",
"banana",
"coconut",
"target",
]
_TEST_TRAINING_COLUMN_TRANSFORMATIONS = [
{"auto": {"column_name": "sepal_width"}},
{"auto": {"column_name": "sepal_length"}},
{"auto": {"column_name": "petal_length"}},
{"auto": {"column_name": "petal_width"}},
]
_TEST_TRAINING_COLUMN_SPECS = {
"apple": "auto",
"banana": "auto",
"coconut": "auto",
}
_TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE = [
{"auto": {"column_name": "apple"}},
{"auto": {"column_name": "banana"}},
{"auto": {"column_name": "coconut"}},
]
_TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE_NOT_AUTO = [
{"numeric": {"column_name": "apple"}},
{"categorical": {"column_name": "banana"}},
{"text": {"column_name": "coconut"}},
]
_TEST_TRAINING_TARGET_COLUMN = "target"
_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS = 1000
_TEST_TRAINING_WEIGHT_COLUMN = "weight"
_TEST_TRAINING_DISABLE_EARLY_STOPPING = True
_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME = "minimize-log-loss"
_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE = "classification"
_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS = True
_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI = (
"bq://path.to.table"
)
_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION = False
_TEST_ADDITIONAL_EXPERIMENTS = ["exp1", "exp2"]
_TEST_TRAINING_TASK_INPUTS_DICT = {
# required inputs
"targetColumn": _TEST_TRAINING_TARGET_COLUMN,
"transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS,
"trainBudgetMilliNodeHours": _TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
# optional inputs
"weightColumnName": _TEST_TRAINING_WEIGHT_COLUMN,
"disableEarlyStopping": _TEST_TRAINING_DISABLE_EARLY_STOPPING,
"predictionType": _TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
"optimizationObjective": _TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
"optimizationObjectiveRecallValue": None,
"optimizationObjectivePrecisionValue": None,
}
_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
_TEST_TRAINING_TASK_INPUTS_DICT,
struct_pb2.Value(),
)
_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS = json_format.ParseDict(
{
**_TEST_TRAINING_TASK_INPUTS_DICT,
"additionalExperiments": _TEST_ADDITIONAL_EXPERIMENTS,
},
struct_pb2.Value(),
)
_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE = json_format.ParseDict(
{
**_TEST_TRAINING_TASK_INPUTS_DICT,
"transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE,
},
struct_pb2.Value(),
)
_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE_NOT_AUTO = json_format.ParseDict(
{
**_TEST_TRAINING_TASK_INPUTS_DICT,
"transformations": _TEST_TRAINING_COLUMN_TRANSFORMATIONS_ALTERNATIVE_NOT_AUTO,
},
struct_pb2.Value(),
)
_TEST_TRAINING_TASK_INPUTS_WITH_EXPORT_EVAL_DATA_ITEMS = json_format.ParseDict(
{
**_TEST_TRAINING_TASK_INPUTS_DICT,
"exportEvaluatedDataItemsConfig": {
"destinationBigqueryUri": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
"overrideExistingTable": _TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
},
},
struct_pb2.Value(),
)
_TEST_DATASET_NAME = "test-dataset-name"
_TEST_MODEL_DISPLAY_NAME = "model-display-name"
_TEST_LABELS = {"key": "value"}
_TEST_MODEL_LABELS = {"model_key": "model_value"}
_TEST_FRACTION_SPLIT_TRAINING = 0.6
_TEST_FRACTION_SPLIT_VALIDATION = 0.2
_TEST_FRACTION_SPLIT_TEST = 0.2
_TEST_SPLIT_PREDEFINED_COLUMN_NAME = "split"
_TEST_SPLIT_TIMESTAMP_COLUMN_NAME = "timestamp"
_TEST_OUTPUT_PYTHON_PACKAGE_PATH = "gs://test/ouput/python/trainer.tar.gz"
_TEST_MODEL_NAME = "projects/my-project/locations/us-central1/models/12345"
_TEST_PIPELINE_RESOURCE_NAME = (
"projects/my-project/locations/us-central1/trainingPipelines/12345"
)
# CMEK encryption
_TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_default"
_TEST_DEFAULT_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME
)
_TEST_PIPELINE_ENCRYPTION_KEY_NAME = "key_pipeline"
_TEST_PIPELINE_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME
)
_TEST_MODEL_ENCRYPTION_KEY_NAME = "key_model"
_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
)
@pytest.fixture
def mock_pipeline_service_create():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.return_value = (
gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
)
)
yield mock_create_training_pipeline
@pytest.fixture
def mock_pipeline_service_create_fail():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.side_effect = RuntimeError("Mock fail")
yield mock_create_training_pipeline
@pytest.fixture
def mock_pipeline_service_get():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
) as mock_get_training_pipeline:
mock_get_training_pipeline.return_value = (
gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
model_to_upload=gca_model.Model(name=_TEST_MODEL_NAME),
)
)
yield mock_get_training_pipeline
@pytest.fixture
def mock_pipeline_service_create_and_get_with_fail():
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
) as mock_create_training_pipeline:
mock_create_training_pipeline.return_value = (
gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
)
)
with mock.patch.object(
pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
) as mock_get_training_pipeline:
mock_get_training_pipeline.return_value = (
gca_training_pipeline.TrainingPipeline(
name=_TEST_PIPELINE_RESOURCE_NAME,
state=gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
)
)
yield mock_create_training_pipeline, mock_get_training_pipeline
@pytest.fixture
def mock_model_service_get():
with mock.patch.object(
model_service_client.ModelServiceClient, "get_model"
) as mock_get_model:
mock_get_model.return_value = gca_model.Model()
yield mock_get_model
@pytest.fixture
def mock_dataset_tabular():
ds = mock.MagicMock(datasets.TabularDataset)
ds.name = _TEST_DATASET_NAME
ds._latest_future = None
ds._exception = None
ds._gca_resource = gca_dataset.Dataset(
display_name=_TEST_DATASET_DISPLAY_NAME,
metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR,
labels={},
name=_TEST_DATASET_NAME,
metadata={},
)
ds.column_names = _TEST_TRAINING_COLUMN_NAMES
yield ds
@pytest.fixture
def mock_dataset_tabular_alternative():
ds = mock.MagicMock(datasets.TabularDataset)
ds.name = _TEST_DATASET_NAME
ds._latest_future = None
ds._exception = None
ds._gca_resource = gca_dataset.Dataset(
display_name=_TEST_DATASET_DISPLAY_NAME,
metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_TABULAR,
labels={},
name=_TEST_DATASET_NAME,
metadata={},
)
ds.column_names = _TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE
yield ds
@pytest.fixture
def mock_dataset_nontabular():
ds = mock.MagicMock(datasets.ImageDataset)
ds.name = _TEST_DATASET_NAME
ds._latest_future = None
ds._exception = None
ds._gca_resource = gca_dataset.Dataset(
display_name=_TEST_DATASET_DISPLAY_NAME,
metadata_schema_uri=_TEST_METADATA_SCHEMA_URI_NONTABULAR,
labels={},
name=_TEST_DATASET_NAME,
metadata={},
)
return ds
@pytest.mark.usefixtures("google_auth_mock")
class TestAutoMLTabularTrainingJob:
def setup_method(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_service_create(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
aiplatform.init(
project=_TEST_PROJECT,
staging_bucket=_TEST_BUCKET_NAME,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
model_labels=_TEST_MODEL_LABELS,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
sync=sync,
create_request_timeout=None,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
labels=_TEST_MODEL_LABELS,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
assert job._gca_resource is mock_pipeline_service_get.return_value
mock_model_service_get.assert_called_once_with(
name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
)
assert model_from_job._gca_resource is mock_model_service_get.return_value
assert job.get_model()._gca_resource is mock_model_service_get.return_value
assert not job.has_failed
assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_service_create_with_timeout(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
aiplatform.init(
project=_TEST_PROJECT,
staging_bucket=_TEST_BUCKET_NAME,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
model_labels=_TEST_MODEL_LABELS,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
sync=sync,
create_request_timeout=180.0,
)
job.wait_for_resource_creation()
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
labels=_TEST_MODEL_LABELS,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=180.0,
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_service_create_with_export_eval_data_items(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
aiplatform.init(
project=_TEST_PROJECT,
staging_bucket=_TEST_BUCKET_NAME,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
sync=sync,
create_request_timeout=None,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_EXPORT_EVAL_DATA_ITEMS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
assert job._gca_resource is mock_pipeline_service_get.return_value
mock_model_service_get.assert_called_once_with(
name=_TEST_MODEL_NAME, retry=base._DEFAULT_RETRY
)
assert model_from_job._gca_resource is mock_model_service_get.return_value
assert job.get_model()._gca_resource is mock_model_service_get.return_value
assert not job.has_failed
assert job.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
@pytest.mark.usefixtures("mock_pipeline_service_get")
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
self,
mock_pipeline_service_create,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
training_encryption_spec_key_name=_TEST_PIPELINE_ENCRYPTION_KEY_NAME,
model_encryption_spec_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
create_request_timeout=None,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
if not sync:
model_from_job.wait()
# Test that if defaults to the job display name
true_managed_model = gca_model.Model(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
encryption_spec=_TEST_MODEL_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_PIPELINE_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
# This test checks that default transformations are used if no columns transformations are provided
def test_run_call_pipeline_service_create_if_no_column_transformations(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
aiplatform.init(
project=_TEST_PROJECT,
staging_bucket=_TEST_BUCKET_NAME,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=None,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
# This test checks that default transformations are used if no columns transformations are provided
def test_run_call_pipeline_service_create_if_set_additional_experiments(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
aiplatform.init(
project=_TEST_PROJECT,
staging_bucket=_TEST_BUCKET_NAME,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=None,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
job._add_additional_experiments(_TEST_ADDITIONAL_EXPERIMENTS)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_service_create_with_column_specs(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular_alternative,
mock_model_service_get,
sync,
):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
dataset=mock_dataset_tabular_alternative,
target_column=_TEST_TRAINING_TARGET_COLUMN,
)
assert column_specs == _TEST_TRAINING_COLUMN_SPECS
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_specs=column_specs,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular_alternative,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(display_name=_TEST_MODEL_DISPLAY_NAME)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular_alternative.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
def test_call_pipeline_service_create_with_column_specs_and_transformations_raises(
self,
mock_dataset_tabular_alternative,
sync,
):
aiplatform.init()
column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
dataset=mock_dataset_tabular_alternative,
target_column=_TEST_TRAINING_TARGET_COLUMN,
)
assert column_specs == _TEST_TRAINING_COLUMN_SPECS
with pytest.raises(ValueError):
training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
column_specs=column_specs,
)
@pytest.mark.parametrize("sync", [True, False])
def test_get_column_specs_no_target_raises(
self,
mock_dataset_tabular_alternative,
sync,
):
aiplatform.init()
with pytest.raises(TypeError):
training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
dataset=mock_dataset_tabular_alternative
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_service_create_with_column_specs_not_auto(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular_alternative,
mock_model_service_get,
sync,
):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
column_specs = training_jobs.AutoMLTabularTrainingJob.get_auto_column_specs(
dataset=mock_dataset_tabular_alternative,
target_column=_TEST_TRAINING_TARGET_COLUMN,
)
column_specs[
_TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE[0]
] = training_jobs.AutoMLTabularTrainingJob.column_data_types.NUMERIC
column_specs[
_TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE[1]
] = training_jobs.AutoMLTabularTrainingJob.column_data_types.CATEGORICAL
column_specs[
_TEST_TRAINING_COLUMN_NAMES_ALTERNATIVE[2]
] = training_jobs.AutoMLTabularTrainingJob.column_data_types.TEXT
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
column_specs=column_specs,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular_alternative,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(display_name=_TEST_MODEL_DISPLAY_NAME)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular_alternative.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS_ALTERNATIVE_NOT_AUTO,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.usefixtures(
"mock_pipeline_service_create",
"mock_pipeline_service_get",
"mock_model_service_get",
)
@pytest.mark.parametrize("sync", [True, False])
# Also acts as a custom column_transformations test as it should not error during first call
def test_run_called_twice_raises(self, mock_dataset_tabular, sync):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
sync=sync,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
with pytest.raises(RuntimeError):
job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
sync=sync,
)
@pytest.mark.parametrize("sync", [True, False])
def test_run_raises_if_pipeline_fails(
self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_tabular, sync
):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
with pytest.raises(RuntimeError):
job.run(
model_display_name=_TEST_MODEL_DISPLAY_NAME,
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
sync=sync,
)
if not sync:
job.wait()
with pytest.raises(RuntimeError):
job.get_model()
def test_wait_for_resource_creation_does_not_fail_if_creation_does_not_fail(
self, mock_pipeline_service_create_and_get_with_fail, mock_dataset_tabular
):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
job.run(
model_display_name=_TEST_MODEL_DISPLAY_NAME,
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
sync=False,
)
job.wait_for_resource_creation()
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
with pytest.raises(RuntimeError):
job.wait()
with pytest.raises(RuntimeError):
job.get_model()
@pytest.mark.usefixtures("mock_pipeline_service_create_fail")
@pytest.mark.parametrize("sync", [True, False])
def test_create_fails(self, mock_dataset_tabular, sync):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
if sync:
with pytest.raises(RuntimeError) as e:
job.run(
model_display_name=_TEST_MODEL_DISPLAY_NAME,
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
sync=sync,
)
assert e.match("Mock fail")
with pytest.raises(RuntimeError) as e:
job.wait_for_resource_creation()
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created."
)
with pytest.raises(RuntimeError) as e:
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created."
)
job.wait()
with pytest.raises(RuntimeError) as e:
job.get_model()
e.match(
regexp="TrainingPipeline has not been launched. You must run this TrainingPipeline using TrainingPipeline.run."
)
else:
job.run(
model_display_name=_TEST_MODEL_DISPLAY_NAME,
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
sync=sync,
)
with pytest.raises(RuntimeError) as e:
job.wait_for_resource_creation()
assert e.match(regexp=r"Mock fail")
with pytest.raises(RuntimeError) as e:
assert job.resource_name == _TEST_PIPELINE_RESOURCE_NAME
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created. Resource failed with: Mock fail"
)
with pytest.raises(RuntimeError):
job.wait()
with pytest.raises(RuntimeError):
job.get_model()
def test_raises_before_run_is_called(self, mock_pipeline_service_create):
aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
with pytest.raises(RuntimeError):
job.get_model()
with pytest.raises(RuntimeError):
job.has_failed
with pytest.raises(RuntimeError):
job.state
with pytest.raises(RuntimeError) as e:
job.wait_for_resource_creation()
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource is not scheduled to be created."
)
def test_properties_throw_if_not_available(self):
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
)
with pytest.raises(RuntimeError) as e:
job.name
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
with pytest.raises(RuntimeError) as e:
job.resource_name
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
with pytest.raises(RuntimeError) as e:
job.display_name
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
with pytest.raises(RuntimeError) as e:
job.create_time
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
with pytest.raises(RuntimeError) as e:
job.encryption_spec
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
with pytest.raises(RuntimeError) as e:
job.labels
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
with pytest.raises(RuntimeError) as e:
job.gca_resource
assert e.match(
regexp=r"AutoMLTabularTrainingJob resource has not been created"
)
@pytest.mark.parametrize("sync", [True, False])
def test_splits_fraction(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
if not sync:
model_from_job.wait()
true_fraction_split = gca_training_pipeline.FractionSplit(
training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction=_TEST_FRACTION_SPLIT_TEST,
)
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
fraction_split=true_fraction_split,
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
def test_splits_timestamp(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
training_fraction_split=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction_split=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction_split=_TEST_FRACTION_SPLIT_TEST,
timestamp_split_column_name=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
if not sync:
model_from_job.wait()
true_split = gca_training_pipeline.TimestampSplit(
training_fraction=_TEST_FRACTION_SPLIT_TRAINING,
validation_fraction=_TEST_FRACTION_SPLIT_VALIDATION,
test_fraction=_TEST_FRACTION_SPLIT_TEST,
key=_TEST_SPLIT_TIMESTAMP_COLUMN_NAME,
)
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
timestamp_split=true_split,
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
def test_splits_predefined(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
predefined_split_column_name=_TEST_SPLIT_PREDEFINED_COLUMN_NAME,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
if not sync:
model_from_job.wait()
true_split = gca_training_pipeline.PredefinedSplit(
key=_TEST_SPLIT_PREDEFINED_COLUMN_NAME
)
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
predefined_split=true_split,
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
@pytest.mark.parametrize("sync", [True, False])
def test_splits_default(
self,
mock_pipeline_service_create,
mock_pipeline_service_get,
mock_dataset_tabular,
mock_model_service_get,
sync,
):
"""
Initiate aiplatform with encryption key name.
Create and run an AutoML Video Classification training job, verify calls and return value
"""
aiplatform.init(
project=_TEST_PROJECT,
encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME,
)
job = training_jobs.AutoMLTabularTrainingJob(
display_name=_TEST_DISPLAY_NAME,
optimization_prediction_type=_TEST_TRAINING_OPTIMIZATION_PREDICTION_TYPE,
optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
optimization_objective_recall_value=None,
optimization_objective_precision_value=None,
)
model_from_job = job.run(
dataset=mock_dataset_tabular,
target_column=_TEST_TRAINING_TARGET_COLUMN,
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
model_display_name=_TEST_MODEL_DISPLAY_NAME,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
sync=sync,
create_request_timeout=None,
)
if not sync:
model_from_job.wait()
true_managed_model = gca_model.Model(
display_name=_TEST_MODEL_DISPLAY_NAME,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
true_input_data_config = gca_training_pipeline.InputDataConfig(
dataset_id=mock_dataset_tabular.name,
)
true_training_pipeline = gca_training_pipeline.TrainingPipeline(
display_name=_TEST_DISPLAY_NAME,
training_task_definition=schema.training_job.definition.automl_tabular,
training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
)
mock_pipeline_service_create.assert_called_once_with(
parent=initializer.global_config.common_location_path(),
training_pipeline=true_training_pipeline,
timeout=None,
)
| 37.660971
| 133
| 0.704726
| 5,891
| 55,098
| 6.016126
| 0.056527
| 0.05519
| 0.02878
| 0.018227
| 0.897153
| 0.86304
| 0.84611
| 0.819616
| 0.799357
| 0.78364
| 0
| 0.001193
| 0.23941
| 55,098
| 1,462
| 134
| 37.686731
| 0.844513
| 0.027133
| 0
| 0.689799
| 0
| 0
| 0.042632
| 0.018164
| 0
| 0
| 0
| 0
| 0.040134
| 1
| 0.025084
| false
| 0
| 0.012542
| 0
| 0.039298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c7395637b722f08a3d429c1240ff3e388fe53e4f
| 169
|
py
|
Python
|
googlenewspy/exceptions.py
|
fernandoarrj/googlenewspy
|
bc240d3cf92b7a57d73cbcac775e6c7d32ea17d2
|
[
"MIT"
] | 2
|
2020-05-06T01:25:50.000Z
|
2020-09-28T19:21:04.000Z
|
googlenewspy/exceptions.py
|
fernandoarrj/googlenewspy
|
bc240d3cf92b7a57d73cbcac775e6c7d32ea17d2
|
[
"MIT"
] | 2
|
2020-05-06T01:32:19.000Z
|
2021-01-23T00:53:56.000Z
|
googlenewspy/exceptions.py
|
fernandoarrj/googlenewspy
|
bc240d3cf92b7a57d73cbcac775e6c7d32ea17d2
|
[
"MIT"
] | 3
|
2020-05-06T01:25:56.000Z
|
2020-12-10T23:10:59.000Z
|
class SearchGoogleNewsError(Exception):
pass
class SearchGoogleNewsDataSourceNotFound(Exception):
pass
class SearchGoogleNewsParseError(Exception):
pass
| 15.363636
| 52
| 0.798817
| 12
| 169
| 11.25
| 0.5
| 0.288889
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147929
| 169
| 10
| 53
| 16.9
| 0.9375
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c7396729d078e30ac58e65f309597720cb56364e
| 1,112
|
py
|
Python
|
purnkleen/views.py
|
RommelTJ/purnkleen
|
7a2c94fa0c2331cdc2f72e4d6718068bf00357c4
|
[
"MIT"
] | 1
|
2017-12-22T04:48:22.000Z
|
2017-12-22T04:48:22.000Z
|
purnkleen/views.py
|
RommelTJ/purnkleen
|
7a2c94fa0c2331cdc2f72e4d6718068bf00357c4
|
[
"MIT"
] | 27
|
2018-03-05T16:21:52.000Z
|
2021-03-09T04:41:16.000Z
|
purnkleen/views.py
|
RommelTJ/purnkleen
|
7a2c94fa0c2331cdc2f72e4d6718068bf00357c4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
#########################
# Static Views #
#########################
def home(request):
return render(request, 'index.html', {})
def giveaway(request):
return render(request, 'giveaway.html', {})
def about(request):
return render(request, 'about.html', {})
def vision(request):
return render(request, 'vision.html', {})
def values(request):
return render(request, 'values.html', {})
def benefits(request):
return render(request, 'benefits.html', {})
def bylaws(request):
return render(request, 'bylaws.html', {})
def mission_planner(request):
return render(request, 'mission-planner.html', {})
def fleet_view(request):
return render(request, 'fleet-view.html', {})
def fuel_services(request):
return render(request, 'fuel-services.html', {})
def maintenance_repair(request):
return render(request, 'maintenance-and-repair.html', {})
def transportation(request):
return render(request, 'transportation.html', {})
def links_tools(request):
return render(request, 'links-and-tools.html', {})
| 19.508772
| 61
| 0.648381
| 123
| 1,112
| 5.821138
| 0.252033
| 0.236034
| 0.344972
| 0.472067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157374
| 1,112
| 56
| 62
| 19.857143
| 0.764141
| 0.010791
| 0
| 0
| 0
| 0
| 0.190935
| 0.026037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.481481
| false
| 0
| 0.037037
| 0.481481
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c77285576b57628ff45f0d43a852356cb1fbc154
| 106
|
py
|
Python
|
cryptoxlib/clients/bitvavo/exceptions.py
|
PetrZufan/cryptoxlib-aio
|
8fbb817ee7a7a88693804e24877863370d1d53c7
|
[
"MIT"
] | 90
|
2020-04-09T18:34:49.000Z
|
2022-03-09T14:29:32.000Z
|
cryptoxlib/clients/bitvavo/exceptions.py
|
PetrZufan/cryptoxlib-aio
|
8fbb817ee7a7a88693804e24877863370d1d53c7
|
[
"MIT"
] | 44
|
2020-04-03T17:02:20.000Z
|
2022-01-29T14:51:51.000Z
|
cryptoxlib/clients/bitvavo/exceptions.py
|
PetrZufan/cryptoxlib-aio
|
8fbb817ee7a7a88693804e24877863370d1d53c7
|
[
"MIT"
] | 28
|
2020-04-25T21:34:53.000Z
|
2022-03-31T07:20:07.000Z
|
from cryptoxlib.exceptions import CryptoXLibException
class BitvavoException(CryptoXLibException):
pass
| 21.2
| 53
| 0.877358
| 9
| 106
| 10.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084906
| 106
| 5
| 54
| 21.2
| 0.958763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
c7e0d7e2709a4811813343696d6abf78774f9ccb
| 187
|
py
|
Python
|
nuagelearning/__init__.py
|
mcrts/nuage-learning
|
5aa567e55063d513060dccdc6fea54e8a0533175
|
[
"MIT"
] | 1
|
2022-03-30T18:39:41.000Z
|
2022-03-30T18:39:41.000Z
|
nuagelearning/__init__.py
|
mcrts/nuage-learning
|
5aa567e55063d513060dccdc6fea54e8a0533175
|
[
"MIT"
] | null | null | null |
nuagelearning/__init__.py
|
mcrts/nuage-learning
|
5aa567e55063d513060dccdc6fea54e8a0533175
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""nuage-learning: implementing federated learning using Kafka."""
from . import utils
from . import client
from . import server
from . import admin
from . import model
| 18.7
| 66
| 0.743316
| 25
| 187
| 5.56
| 0.64
| 0.359712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00641
| 0.165775
| 187
| 9
| 67
| 20.777778
| 0.884615
| 0.40107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7e889acd1271585a48f1a61de344e94d52bbd28
| 33
|
py
|
Python
|
00_Original/37_Anbindung_an_andere_Programmiersprachen/Alternative_Interpreter/IronPython/script.py
|
felixdittrich92/Python3_book
|
cd0e2b55aa72c51927d347b70199fb9ed928e06f
|
[
"MIT"
] | null | null | null |
00_Original/37_Anbindung_an_andere_Programmiersprachen/Alternative_Interpreter/IronPython/script.py
|
felixdittrich92/Python3_book
|
cd0e2b55aa72c51927d347b70199fb9ed928e06f
|
[
"MIT"
] | null | null | null |
00_Original/37_Anbindung_an_andere_Programmiersprachen/Alternative_Interpreter/IronPython/script.py
|
felixdittrich92/Python3_book
|
cd0e2b55aa72c51927d347b70199fb9ed928e06f
|
[
"MIT"
] | null | null | null |
def quadrat(x):
return x**2
| 11
| 15
| 0.575758
| 6
| 33
| 3.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.272727
| 33
| 3
| 16
| 11
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4014f4684862544e4c158c5e6465adacdc0e290f
| 525
|
py
|
Python
|
create_valid.py
|
juwangvsu/keras-yolo3-1
|
2ec448c38238a00c9bdd007b578e65db2e09f32a
|
[
"MIT"
] | null | null | null |
create_valid.py
|
juwangvsu/keras-yolo3-1
|
2ec448c38238a00c9bdd007b578e65db2e09f32a
|
[
"MIT"
] | null | null | null |
create_valid.py
|
juwangvsu/keras-yolo3-1
|
2ec448c38238a00c9bdd007b578e65db2e09f32a
|
[
"MIT"
] | null | null | null |
import os
import sys
from subprocess import Popen
f = open("valid_list.txt", "r")
for x in f:
print(x)
xx=x.split('.')[0]
Popen(['nohup', '/media/student/code1/keras-yolo3/create_link.sh', '/media/student/voc2012/VOCdevkit/VOC2012/val_image/'+xx+'.jpg','/media/student/voc2012/VOCdevkit/VOC2012/JPEGImages/'+xx+'.jpg'])
Popen(['nohup', '/media/student/code1/keras-yolo3/create_link.sh', '/media/student/voc2012/VOCdevkit/VOC2012/val_ann/'+xx+'.xml','/media/student/voc2012/VOCdevkit/VOC2012/Annotations/'+xx+'.xml'])
| 52.5
| 199
| 0.72381
| 78
| 525
| 4.807692
| 0.474359
| 0.192
| 0.202667
| 0.298667
| 0.650667
| 0.464
| 0.464
| 0.464
| 0.464
| 0.464
| 0
| 0.075051
| 0.060952
| 525
| 9
| 200
| 58.333333
| 0.685598
| 0
| 0
| 0
| 0
| 0
| 0.649524
| 0.569524
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.111111
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.