hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3be947a82f13de6d26fc798282d17c1307b2aaf7
| 257
|
py
|
Python
|
ex-mundo3/ex107/moeda.py
|
PedroPegado/ex-cursoemvideo
|
46751a7238e6a142b639c4cc3acf1759411732d7
|
[
"MIT"
] | null | null | null |
ex-mundo3/ex107/moeda.py
|
PedroPegado/ex-cursoemvideo
|
46751a7238e6a142b639c4cc3acf1759411732d7
|
[
"MIT"
] | null | null | null |
ex-mundo3/ex107/moeda.py
|
PedroPegado/ex-cursoemvideo
|
46751a7238e6a142b639c4cc3acf1759411732d7
|
[
"MIT"
] | null | null | null |
def aumentar(preco, taxa):
p = preco + (preco * taxa/100)
return p
def diminuir(preco, taxa):
p = preco - (preco * taxa/100)
return p
def dobro(preco):
p = preco * 2
return p
def metade(preco):
p = preco / 2
return p
| 12.238095
| 34
| 0.564202
| 38
| 257
| 3.815789
| 0.289474
| 0.248276
| 0.206897
| 0.206897
| 0.772414
| 0.772414
| 0.510345
| 0.510345
| 0.510345
| 0.510345
| 0
| 0.045714
| 0.319066
| 257
| 20
| 35
| 12.85
| 0.782857
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
3beb73cbef34b508a909878716873d4472cedd74
| 64
|
py
|
Python
|
tftf/layers/activations/tanh.py
|
yusugomori/tftf
|
e98b9ddffdbaa1fe04320437a47f12f3182ab6f3
|
[
"Apache-2.0"
] | 35
|
2018-08-11T05:01:41.000Z
|
2021-01-29T02:28:47.000Z
|
tftf/layers/activations/tanh.py
|
yusugomori/tftf
|
e98b9ddffdbaa1fe04320437a47f12f3182ab6f3
|
[
"Apache-2.0"
] | null | null | null |
tftf/layers/activations/tanh.py
|
yusugomori/tftf
|
e98b9ddffdbaa1fe04320437a47f12f3182ab6f3
|
[
"Apache-2.0"
] | 4
|
2018-10-19T14:12:04.000Z
|
2021-01-29T02:28:49.000Z
|
import tensorflow as tf
def tanh(x):
return tf.nn.tanh(x)
| 10.666667
| 24
| 0.671875
| 12
| 64
| 3.583333
| 0.75
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21875
| 64
| 5
| 25
| 12.8
| 0.86
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
ce14ba7248ea553bc8bf340da9e895166445335c
| 47
|
py
|
Python
|
libs/messaging_service/__init__.py
|
wip-abramson/aries-jupyter-playground
|
872f1a319f9072d7160298fcce82fb64c93d7397
|
[
"Apache-2.0"
] | 6
|
2021-05-27T12:51:32.000Z
|
2022-01-11T05:49:12.000Z
|
libs/messaging_service/__init__.py
|
SoftwareImpacts/SIMPAC-2021-64
|
4089946109e05516bbea70359d3bf1d02b245f4a
|
[
"Apache-2.0"
] | 2
|
2021-10-05T07:38:05.000Z
|
2022-02-10T11:38:18.000Z
|
libs/messaging_service/__init__.py
|
SoftwareImpacts/SIMPAC-2021-64
|
4089946109e05516bbea70359d3bf1d02b245f4a
|
[
"Apache-2.0"
] | 7
|
2021-04-22T14:18:06.000Z
|
2022-02-14T10:30:52.000Z
|
from .messaging_service import MessagingService
| 47
| 47
| 0.914894
| 5
| 47
| 8.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 47
| 1
| 47
| 47
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce1579bf8768e7cef70aebd7b3896b98ea1a0187
| 54
|
py
|
Python
|
networkx-d3-v2/networkx/tests/__init__.py
|
suraj-testing2/Clock_Websites
|
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
|
[
"Apache-2.0"
] | null | null | null |
networkx-d3-v2/networkx/tests/__init__.py
|
suraj-testing2/Clock_Websites
|
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
|
[
"Apache-2.0"
] | null | null | null |
networkx-d3-v2/networkx/tests/__init__.py
|
suraj-testing2/Clock_Websites
|
0e65331da40cfd3766f1bde17f0a9c7ff6666dea
|
[
"Apache-2.0"
] | null | null | null |
from .utils_tests import *
from .views_tests import *
| 18
| 26
| 0.777778
| 8
| 54
| 5
| 0.625
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 2
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce21a48448d28f3cf598b5cbc7c2ecedcc9ebfb2
| 46,925
|
py
|
Python
|
tests/unittests/test_mock_network_plugin_public_nat.py
|
cloudify-cosmo/tosca-vcloud-plugin
|
c5196abd066ba5315b66911e5390b0ed6c15988f
|
[
"Apache-2.0"
] | 4
|
2015-02-25T12:39:01.000Z
|
2018-02-14T15:14:16.000Z
|
tests/unittests/test_mock_network_plugin_public_nat.py
|
cloudify-cosmo/tosca-vcloud-plugin
|
c5196abd066ba5315b66911e5390b0ed6c15988f
|
[
"Apache-2.0"
] | 45
|
2015-01-13T13:55:10.000Z
|
2020-02-04T15:06:15.000Z
|
tests/unittests/test_mock_network_plugin_public_nat.py
|
cloudify-cosmo/tosca-vcloud-plugin
|
c5196abd066ba5315b66911e5390b0ed6c15988f
|
[
"Apache-2.0"
] | 21
|
2015-01-21T17:17:18.000Z
|
2021-05-05T14:08:25.000Z
|
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from cloudify import exceptions as cfy_exc
from tests.unittests import test_mock_base
from vcloud_network_plugin import public_nat
from vcloud_network_plugin import utils
import vcloud_network_plugin
import vcloud_plugin_common
from IPy import IP
class NetworkPluginPublicNatMockTestCase(test_mock_base.TestBase):
def test_is_rule_exists(self):
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', '22', 'internal', '11', 'TCP'
)
# exist
self.assertTrue(
public_nat._is_rule_exists(
[rule_inlist], 'SNAT', 'external', '22', 'internal',
'11', 'TCP')
)
# not exist
self.assertFalse(
public_nat._is_rule_exists(
[rule_inlist], 'DNAT', 'external', '22', 'internal',
'11', 'UDP')
)
def test_get_original_port_for_delete(self):
# no replacement
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}}
self.assertEqual(
public_nat._get_original_port_for_delete(
fake_ctx, "10.1.1.1", "11"),
"11"
)
# replacement for other
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {
"10.1.1.2:11": '12'
}
}
self.assertEqual(
public_nat._get_original_port_for_delete(
fake_ctx, "10.1.1.1", "11"),
"11"
)
# replacement for other
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {
"10.1.1.2:11": '12'
}
}
self.assertEqual(
public_nat._get_original_port_for_delete(
fake_ctx, "10.1.1.2", "11"),
"12"
)
def test_get_original_port_for_create(self):
gateway = mock.Mock()
fake_ctx = self.generate_relation_context_with_current_ctx()
rule_inlist = self.generate_nat_rule(
'DNAT', 'external', 'any', 'internal', '11', 'TCP')
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
# exeption about same port
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'DNAT', 'external', 'any', 'internal',
'11', 'TCP'
)
# everythiong fine with different port
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'DNAT', 'external', '12', 'internal',
'12', 'TCP'
),
12)
# relink some port to other
# port have not used yet
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external', 13, 'internal',
'12', 'TCP'),
13)
def test_get_original_port_for_create_with_ctx(self):
# with replace, but without replace table - up port +1
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}
}
gateway = mock.Mock()
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', 10, 'internal', 11, 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external', '10', 'internal',
'11', 'TCP'
),
11
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
public_nat.PORT_REPLACEMENT: {
'external:10': 11
}
}
)
# same but without replacement at all
fake_ctx._target.instance.runtime_properties = {}
self.assertEqual(
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external', '10', 'internal',
'11', 'TCP'
),
11
)
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
public_nat.PORT_REPLACEMENT: {
'external:10': 11
}
}
)
# we dont have enought ports
rule_inlist = self.generate_nat_rule(
'SNAT', 'external', utils.MAX_PORT_NUMBER,
'internal', 11, 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(return_value=[rule_inlist])
fake_ctx._target.instance.runtime_properties = {}
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._get_original_port_for_create(
fake_ctx, gateway, 'SNAT', 'external',
utils.MAX_PORT_NUMBER, 'internal', '11', 'TCP'
)
def test_get_gateway_ip_range(self):
gate = mock.Mock()
# empty list of networks
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'something'),
None
)
# exist other network
gate.get_dhcp_pools = mock.MagicMock(return_value=[
self.genarate_pool(
'test_network', '127.0.0.1', '127.0.0.255'
)
])
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'something'),
None
)
# exist correct network
self.assertEqual(
public_nat._get_gateway_ip_range(gate, 'test_network'),
(IP('127.0.0.1'), IP('127.0.0.255'))
)
def test_obtain_public_ip(self):
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
gateway = mock.Mock()
fake_client = mock.Mock()
# exist some ip for delete
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, vcloud_network_plugin.DELETE
),
'192.168.1.1'
)
# no ip for delete
fake_ctx._target.instance.runtime_properties = {}
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, vcloud_network_plugin.DELETE
)
# unknow operation
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, 'unknow operation'
)
# exist some public ip
fake_ctx._target.node.properties = {
'nat': {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
}
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway, vcloud_network_plugin.CREATE
),
'192.168.1.1'
)
# no public ip yet
fake_ctx._target.node.properties = {
'nat': {}
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'vdc': 'vdc_name',
'service_type': vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1', '10.18.1.2'
])
rule_inlist = self.generate_nat_rule(
'DNAT', '10.18.1.1', 'any', 'internal', '11', 'TCP'
)
gateway.get_nat_rules = mock.MagicMock(
return_value=[rule_inlist]
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertEqual(
public_nat._obtain_public_ip(
fake_client, fake_ctx, gateway,
vcloud_network_plugin.CREATE
),
'10.18.1.2'
)
def test_get_network_ip_range(self):
# dont have ip range for this network
fake_client = self.generate_client()
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some_network"
),
None
)
fake_client.get_networks.assert_called_with("some_org")
# different network
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.1", end_ip="127.1.1.255"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some_network"
),
None
)
# correct network name
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.assertEqual(
public_nat._get_network_ip_range(
fake_client, "some_org", "some"
),
(IP('127.1.1.1'), IP('127.1.1.255'))
)
def test_create_ip_range(self):
# context
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._source.instance.runtime_properties = {
vcloud_network_plugin.network.VCLOUD_NETWORK_NAME: "some"
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'org': 'some_org',
'vdc': 'some_vdc'
}
}
fake_ctx._target.instance.runtime_properties = {}
# vca client
fake_client = self.generate_client()
# gateway
gate = fake_client._vdc_gateway
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.100", end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
# empty gateway dhcp pool
# vca pool: 127.1.1.100..127.1.1.200
self.assertEqual(
public_nat._create_ip_range(fake_ctx, fake_client, gate),
'127.1.1.100 - 127.1.1.200'
)
fake_client.get_networks.assert_called_with("some_vdc")
# network from gate
gate.get_dhcp_pools = mock.MagicMock(return_value=[
self.genarate_pool(
"some", '127.1.1.1', '127.1.1.255'
)
])
self.assertEqual(
public_nat._create_ip_range(fake_ctx, fake_client, gate),
'127.1.1.1 - 127.1.1.255'
)
# network not exist
network = self.generate_fake_client_network(
name="other", start_ip="127.1.1.100",
end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(
return_value=[network]
)
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat._create_ip_range(fake_ctx, fake_client, gate)
def test_save_configuration(self):
def _context_for_delete(service_type):
"""
create correct context for delete
"""
fake_ctx = self.generate_relation_context_with_current_ctx()
self.set_services_conf_result(
gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS
)
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: "1.2.3.4",
public_nat.PORT_REPLACEMENT: {
'127.0.0.1:10': '100'
},
vcloud_network_plugin.SSH_PORT: '23',
vcloud_network_plugin.SSH_PUBLIC_IP: '10.1.1.1'
}
properties = {
'vcloud_config': {
'edge_gateway': 'gateway',
'vdc': 'vdc',
'org': 'some_org'
}
}
if service_type:
properties['vcloud_config']['service_type'] = service_type
fake_ctx._source.node.properties = properties
return fake_ctx
def _ip_exist_in_runtime(fake_ctx):
"""
ip still exist in ctx
"""
runtime_properties = fake_ctx._target.instance.runtime_properties
return vcloud_network_plugin.PUBLIC_IP in runtime_properties
fake_client = self.generate_client()
gateway = fake_client._vdc_gateway
# cant save configuration: server busy
self.set_services_conf_result(
gateway, None
)
self.set_gateway_busy(gateway)
fake_ctx = self.generate_relation_context_with_current_ctx()
self.assertFalse(public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.CREATE,
"1.2.3.4"
))
# operation create
fake_ctx = self.generate_relation_context_with_current_ctx()
self.set_services_conf_result(
gateway, vcloud_plugin_common.TASK_STATUS_SUCCESS
)
# success save configuration
with mock.patch('vcloud_plugin_common.ctx', fake_ctx):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.CREATE,
"1.2.3.4")
self.assertEqual(
fake_ctx._target.instance.runtime_properties,
{
vcloud_network_plugin.PUBLIC_IP: "1.2.3.4"
}
)
# delete - subscription service
fake_ctx = _context_for_delete(
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - without service
fake_ctx = _context_for_delete(None)
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - ondemand service - nat
fake_ctx = _context_for_delete(
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'nat': {
vcloud_network_plugin.PUBLIC_IP: "1.2.3.4"
}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
# delete - ondemand - not nat
gateway.deallocate_public_ip = mock.MagicMock(
return_value=self.generate_task(
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
)
fake_ctx = _context_for_delete(
vcloud_plugin_common.ONDEMAND_SERVICE_TYPE
)
fake_ctx._target.node.properties = {
'nat': {}
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
# import pdb;pdb.set_trace()
public_nat._save_configuration(
fake_ctx, gateway, fake_client, vcloud_network_plugin.DELETE,
"1.2.3.4"
)
gateway.deallocate_public_ip.assert_called_with("1.2.3.4")
self.assertFalse(_ip_exist_in_runtime(fake_ctx))
runtime_properties = fake_ctx._target.instance.runtime_properties
self.assertFalse(
public_nat.PORT_REPLACEMENT in runtime_properties
)
self.assertFalse(
vcloud_network_plugin.SSH_PORT in runtime_properties
)
self.assertFalse(
vcloud_network_plugin.SSH_PUBLIC_IP in runtime_properties
)
def test_nat_network_operation(self):
fake_client = self.generate_client()
fake_ctx = self.generate_relation_context_with_current_ctx()
gateway = fake_client._vdc_gateway
# used wrong operation
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway, "unknow", "DNAT", "1.2.3.4",
"2.3.4.5", "11", "11", "TCP"
)
# run correct operation/rule
for operation in [
vcloud_network_plugin.DELETE, vcloud_network_plugin.CREATE
]:
for rule_type in ["SNAT", "DNAT"]:
# cleanup properties
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}}
fake_ctx._source.instance.runtime_properties = {}
# checks
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway, operation,
rule_type,
"1.2.3.4", "2.3.4.5", "11", "11", "TCP"
)
if rule_type == "DNAT":
if operation == vcloud_network_plugin.DELETE:
gateway.del_nat_rule.assert_called_with(
'DNAT', '1.2.3.4', '11', '2.3.4.5', '11',
'TCP'
)
else:
gateway.add_nat_rule.assert_called_with(
'DNAT', '1.2.3.4', '11', '2.3.4.5', '11',
'TCP'
)
else:
if operation == vcloud_network_plugin.DELETE:
gateway.del_nat_rule.assert_called_with(
'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any',
'any'
)
else:
gateway.add_nat_rule.assert_called_with(
'SNAT', '2.3.4.5', 'any', '1.2.3.4', 'any',
'any'
)
# cleanup properties
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.instance.runtime_properties = {
public_nat.PORT_REPLACEMENT: {}}
fake_ctx._source.instance.runtime_properties = {}
# save ssh port
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway,
vcloud_network_plugin.CREATE,
"DNAT", "1.2.3.4", "2.3.4.5", "43", "22", "TCP"
)
self.assertEqual(
{'port_replacement': {'1.2.3.4:43': 43}},
fake_ctx._target.instance.runtime_properties
)
self.assertEqual(
{'ssh_port': '43', 'ssh_public_ip': '1.2.3.4'},
fake_ctx._source.instance.runtime_properties
)
# error with type
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.nat_network_operation(
fake_ctx, fake_client, gateway,
vcloud_network_plugin.CREATE,
"QNAT", "1.2.3.4", "2.3.4.5", "43", "22", "TCP"
)
def generate_client_and_context_server(self, no_vmip=False):
"""
for test prepare_server_operation based operations
"""
vm_ip = '1.1.1.1' if not no_vmip else None
fake_client = self.generate_client(vms_networks=[{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': vm_ip
}])
self.set_network_routed_in_client(fake_client)
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
}
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
return fake_client, fake_ctx
def test_prepare_server_operation(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
# no rules for update
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
# public ip equal to None in node properties
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: None
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertFalse(
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
)
# we dont have connected private ip
fake_client, fake_ctx = self.generate_client_and_context_server(
no_vmip=True
)
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertFalse(
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
)
# with some rules
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
'protocol': 'TCP',
'original_port': "11",
'translated_port': "11"
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', '11', '1.1.1.1', '11', 'TCP'
)
# with default value
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '1.1.1.1', 'any', 'any'
)
# with SNAT rules
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{'type': 'SNAT'}, {'type': 'SNAT'}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_server_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'SNAT', '1.1.1.1', 'any', '192.168.1.1', 'any', 'any'
)
def generate_client_and_context_network(self):
"""
for test prepare_network_operation based operations
"""
fake_client = self.generate_client(vms_networks=[{
'is_connected': True,
'network_name': 'network_name',
'is_primary': True,
'ip': '1.1.1.1'
}])
self.set_network_routed_in_client(fake_client)
gate = fake_client._vdc_gateway
gate.get_dhcp_pools = mock.MagicMock(return_value=[])
network = self.generate_fake_client_network(
name="some", start_ip="127.1.1.100", end_ip="127.1.1.200"
)
fake_client.get_networks = mock.MagicMock(return_value=[network])
self.set_services_conf_result(
fake_client._vdc_gateway,
vcloud_plugin_common.TASK_STATUS_SUCCESS
)
# ctx
fake_ctx = self.generate_relation_context_with_current_ctx()
fake_ctx._source.instance.runtime_properties = {
vcloud_network_plugin.network.VCLOUD_NETWORK_NAME: "some"
}
fake_ctx._source.node.properties = {
'vcloud_config': {
'org': 'some_org',
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
}
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
}
}
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
return fake_client, fake_ctx
def test_prepare_network_operation(self):
# no rules
fake_client, fake_ctx = self.generate_client_and_context_network()
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.prepare_network_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
# public ip equal to None in node properties
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: None
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT',
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
self.assertFalse(
public_nat.prepare_network_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
)
# rules with default values
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.ctx', fake_ctx
):
public_nat.prepare_network_operation(
fake_ctx, fake_client, vcloud_network_plugin.DELETE
)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
def test_creation_validation(self):
fake_client = self.generate_client()
# no nat
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# no gateway
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name'
},
'nat': {
'some_field': 'something'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong ip
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: 'any'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# no free ip
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# no rules
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
}
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong protocol
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "some"
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong original_port
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 'some'
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# wrong original_port
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 11,
'translated_port': 'some'
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
# fine
fake_ctx = self.generate_node_context_with_current_ctx(
properties={
'vcloud_config': {
'vdc': 'vdc_name',
'service_type':
vcloud_plugin_common.SUBSCRIPTION_SERVICE_TYPE
},
'nat': {
'edge_gateway': 'gateway',
vcloud_network_plugin.PUBLIC_IP: '10.12.2.1'
},
'rules': [{
'type': 'DNAT',
'protocol': "TCP",
'original_port': 11,
'translated_port': 12
}]
}
)
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.creation_validation(ctx=fake_ctx, vca_client=None)
def _server_disconnect_to_nat_noexternal(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_ctx._source.instance.runtime_properties = {
'gateway_lock': False,
'vcloud_vapp_name': 'vapp'
}
return fake_client, fake_ctx
def test_server_disconnect_from_nat(self):
# successful
fake_client, fake_ctx = self._server_disconnect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.server_disconnect_from_nat(ctx=fake_ctx,
vca_client=None)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '1.1.1.1', 'any', 'any'
)
# check retry
fake_client, fake_ctx = self._server_disconnect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.server_disconnect_from_nat(ctx=fake_ctx,
vca_client=None)
self.check_retry_realy_called(fake_ctx)
def _server_connect_to_nat_noexternal(self):
fake_client, fake_ctx = self.generate_client_and_context_server()
fake_ctx._target.instance.runtime_properties = {
vcloud_network_plugin.PUBLIC_IP: '192.168.1.1'
}
fake_ctx._source.instance.runtime_properties = {
'gateway_lock': False,
'vcloud_vapp_name': 'vapp'
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(
return_value=['10.18.1.1']
)
return fake_client, fake_ctx
def test_server_connect_to_nat(self):
fake_client, fake_ctx = self._server_connect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.server_connect_to_nat(ctx=fake_ctx, vca_client=None)
fake_client._vdc_gateway.add_nat_rule.assert_called_with(
'DNAT', '10.18.1.1', 'any', '1.1.1.1', 'any', 'any'
)
fake_client, fake_ctx = self._server_connect_to_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.server_connect_to_nat(ctx=fake_ctx, vca_client=None)
self.check_retry_realy_called(fake_ctx)
def _net_disconnect_from_nat_noexternal(self):
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
return fake_client, fake_ctx
def test_net_disconnect_from_nat(self):
# use external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'use_external_resource': True
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_ctx._source.instance.runtime_properties = {
'gateway_lock': False,
'vcloud_vapp_name': 'vapp'
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_disconnect_from_nat(ctx=fake_ctx,
vca_client=fake_client)
# no external
fake_client, fake_ctx = self._net_disconnect_from_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_disconnect_from_nat(ctx=fake_ctx, vca_client=None)
fake_client._vdc_gateway.del_nat_rule.assert_called_with(
'DNAT', '192.168.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
# retry check
fake_client, fake_ctx = self._net_disconnect_from_nat_noexternal()
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.net_disconnect_from_nat(ctx=fake_ctx, vca_client=None)
self.check_retry_realy_called(fake_ctx)
def test_net_connect_to_nat(self):
# use external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'use_external_resource': True
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat(ctx=fake_ctx, vca_client=None)
# no external
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
fake_ctx._source.node.properties = {
'vcloud_config':
{
'edge_gateway': 'gateway',
'vdc': 'vdc'
}
}
fake_client._vdc_gateway.get_public_ips = mock.MagicMock(return_value=[
'10.18.1.1'
])
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat(ctx=fake_ctx, vca_client=None)
fake_client._vdc_gateway.add_nat_rule.assert_called_with(
'DNAT', '10.18.1.1', 'any', '127.1.1.100 - 127.1.1.200',
'any', 'any'
)
# retry check
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
self.prepere_gatway_busy_retry(fake_client, fake_ctx)
public_nat.net_connect_to_nat(ctx=fake_ctx, vca_client=None)
self.check_retry_realy_called(fake_ctx)
def test_net_connect_to_nat_preconfigure(self):
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'DNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.net_connect_to_nat_preconfigure(ctx=fake_ctx,
vca_client=None)
fake_client, fake_ctx = self.generate_client_and_context_network()
fake_ctx._target.node.properties = {
'nat': {
'edge_gateway': 'gateway'
},
'rules': [{
'type': 'SNAT'
}]
}
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
public_nat.net_connect_to_nat_preconfigure(ctx=fake_ctx,
vca_client=None)
# empty rules
fake_ctx._target.node.properties.update({'rules': []})
with mock.patch(
'vcloud_plugin_common.VcloudAirClient.get',
mock.MagicMock(return_value=fake_client)
):
with self.assertRaises(cfy_exc.NonRecoverableError):
public_nat.net_connect_to_nat_preconfigure(ctx=fake_ctx,
vca_client=None)
if __name__ == '__main__':
unittest.main()
| 36.919748
| 79
| 0.542248
| 4,799
| 46,925
| 4.926026
| 0.055845
| 0.062775
| 0.044924
| 0.032953
| 0.880711
| 0.857064
| 0.835321
| 0.823646
| 0.793866
| 0.770178
| 0
| 0.024595
| 0.36228
| 46,925
| 1,270
| 80
| 36.948819
| 0.76538
| 0.046947
| 0
| 0.673759
| 0
| 0
| 0.11188
| 0.030419
| 0
| 0
| 0
| 0
| 0.062057
| 1
| 0.022163
| false
| 0
| 0.007979
| 0
| 0.037234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cbe97f3cb389489740f1e42249ec7c347020db47
| 30
|
py
|
Python
|
otscrape/core/extractor/nested/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/extractor/nested/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
otscrape/core/extractor/nested/__init__.py
|
SSripilaipong/otscrape
|
73ad2ea3d20841cf5d81b37180a1f21c48e87480
|
[
"MIT"
] | null | null | null |
from .zip_dict import ZipDict
| 15
| 29
| 0.833333
| 5
| 30
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cbf7a1ce96364e36588a482e13d4799ada06f5db
| 16,642
|
py
|
Python
|
src/speech/deep_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
|
a072cb940201bbcdb2d0f4d0dfa1dde478fa4464
|
[
"MIT"
] | 6
|
2020-08-03T03:13:25.000Z
|
2022-02-11T08:32:10.000Z
|
src/speech/deep_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
|
a072cb940201bbcdb2d0f4d0dfa1dde478fa4464
|
[
"MIT"
] | 1
|
2020-09-08T16:10:38.000Z
|
2020-09-08T16:10:38.000Z
|
src/speech/deep_model.py
|
dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture
|
a072cb940201bbcdb2d0f4d0dfa1dde478fa4464
|
[
"MIT"
] | 2
|
2020-08-03T21:37:21.000Z
|
2021-03-26T02:19:17.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence
import pdb
import math
torch.manual_seed(1)
class GRUAudio(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(GRUAudio, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.gru = nn.GRU(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_layers * self.num_directions, self.num_labels).to(
self.device)
# self.softmax = nn.Softmax()
def forward(self, input, target, train=True, seq_length=False):
input = input.to(self.device)
target = target.to(self.device)
hidden = torch.randn(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
hidden = hidden.to(self.device)
out, hn = self.gru(input, hidden)
# print(out, out.shape)
# if train:
# hn, _ = pad_packed_sequence(hn, batch_first=True)
hn = hn.permute([1, 0, 2])
hn = hn.reshape(hn.shape[0], -1)
# pdb.set_trace()
out = self.classification(hn)
# out = self.softmax(out)
# pdb.set_trace()
loss = F.cross_entropy(out, torch.max(target, 1)[1])
return out, loss
class AttGRU(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(AttGRU, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.u = nn.Parameter(torch.zeros((self.num_directions * self.hidden_dim)), requires_grad=True)
self.gru = nn.GRU(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, train=True, seq_length=False):
input = input.to(self.device)
target = target.to(self.device)
hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
hidden = hidden.to(self.device)
out, hn = self.gru(input, hidden)
out, _ = pad_packed_sequence(out, batch_first=True)
mask = []
# pdb.set_trace()
for i in range(len(seq_length)):
mask.append([0] * int(seq_length[i].item()) + [1] * int(out.shape[1] - seq_length[i].item()))
mask = torch.ByteTensor(mask)
mask = mask.to(self.device)
x = torch.matmul(out, self.u)
x = x.masked_fill_(mask, -1e18)
alpha = F.softmax(x, dim=1)
input_linear = torch.sum(torch.matmul(alpha, out), dim=1)
out = self.classification(input_linear)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
# print(self.u[10])
return out, loss
class MeanPool(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(MeanPool, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
# self.u=nn.Parameter(torch.randn(self.num_directions*self.hidden_dim)).to(self.device)
self.gru = nn.GRU(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, train=True, seq_length=False):
input = input.to(self.device)
target = target.to(self.device)
hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
hidden = hidden.to(self.device)
out, hn = self.gru(input, hidden)
out, _ = pad_packed_sequence(out, batch_first=True)
out = torch.mean(out, dim=1)
# pdb.set_trace()
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
return out, loss
class LSTM_Audio(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(LSTM_Audio, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
# self.u=nn.Parameter(torch.randn(self.num_directions*self.hidden_dim)).to(self.device)
self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,
dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.classification = nn.Linear(self.hidden_dim * self.num_directions, self.num_labels).to(self.device)
def forward(self, input, target, seq_length, train=True):
input = input.to(self.device)
target = target.to(self.device)
#hidden = torch.zeros(self.num_layers * self.num_directions, self.batch_size, self.hidden_dim)
#hidden = hidden.to(self.device)
# pdb.set_trace()
out, hn = self.lstm(input)
out, _ = pad_packed_sequence(out, batch_first=True)
out = torch.mean(out, dim=1)
# pdb.set_trace()
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1])
return out, loss
class ATT(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(ATT, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
self.attn = nn.Linear(self.hidden_dim * self.num_directions, hidden_dim)
self.u=nn.Parameter(torch.randn(self.hidden_dim))
stdv = 1. / math.sqrt(self.u.shape[0])
self.u.data.normal_(mean=0, std=stdv)
self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True, dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.fc1 = nn.Linear(self.hidden_dim * self.num_directions, self.hidden_dim).to(self.device)
self.batch1=nn.BatchNorm1d(self.hidden_dim)
self.fc2=nn.Linear(self.hidden_dim,self.num_labels).to(self.device)
self.batch2=nn.BatchNorm1d(self.num_labels)
self.batchatt=nn.BatchNorm1d(self.hidden_dim * self.num_directions)
def forward(self, input, target, seq_length, train=True):
input = input.to(self.device)
target = target.to(self.device)
out, hn = self.lstm(input)
out , _ =pad_packed_sequence(out,batch_first=True)
mask=[]
# pdb.set_trace()
for i in range(len(seq_length)):
mask.append([0]*int(seq_length[i].item())+[1]*int(out.shape[1]-seq_length[i].item()))
mask=torch.ByteTensor(mask)
mask=mask.to(self.device)
out_att=torch.tanh(self.attn(out))
x=torch.matmul(out_att,self.u)
x=x.masked_fill_(mask,-1e18)
alpha=F.softmax(x,dim=1)
input_linear=torch.sum(torch.matmul(alpha,out),dim=1)
input_linear_normalized=self.batchatt(input_linear)
out_1 = self.fc1(input_linear_normalized)
out_1_normalized=self.batch1(out_1)
out_2=self.fc2(out_1_normalized)
out_2_normalized=self.batch2(out_2)
loss = F.cross_entropy(out_2_normalized, torch.max(target, 1)[1])
# print(self.u[10])
return out_2, loss
class Mean_Pool_2(nn.Module):
def __init__(self, num_features, hidden_dim, num_layers, dropout_rate, num_labels, batch_size, bidirectional=False):
super(Mean_Pool_2, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_features = num_features
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.num_labels = num_labels
self.batch_size = batch_size
self.bidirectional = bidirectional
self.num_directions = 1 + self.bidirectional
#self.attn = nn.Linear(self.hidden_dim * self.num_directions, hidden_dim)
#self.u=nn.Parameter(torch.randn(self.hidden_dim))
#stdv = 1. / math.sqrt(self.u.shape[0])
#self.u.data.normal_(mean=0, std=stdv)
self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True, dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)
self.fc1 = nn.Linear(self.hidden_dim * self.num_directions, self.hidden_dim).to(self.device)
self.batch1=nn.BatchNorm1d(self.hidden_dim)
self.fc2=nn.Linear(self.hidden_dim,self.num_labels).to(self.device)
self.batch2=nn.BatchNorm1d(self.num_labels)
self.batchatt=nn.BatchNorm1d(self.hidden_dim * self.num_directions)
def forward(self, input, target, seq_length, train=True):
input = input.to(self.device)
target = target.to(self.device)
out, hn = self.lstm(input)
out , _ =pad_packed_sequence(out,batch_first=True)
x=torch.mean(out,dim=1)
input_linear_normalized=self.batchatt(x)
out_1 = self.fc1(input_linear_normalized)
out_1_normalized=self.batch1(out_1)
out_2=self.fc2(out_1_normalized)
out_2_normalized=self.batch2(out_2)
loss = F.cross_entropy(out_2_normalized, torch.max(target, 1)[1])
# print(self.u[10])
return out_2, loss
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, kernel_size_pool=8, stride_pool=4):
super(ConvLSTMCell, self).__init__()
assert hidden_channels % 2 == 0
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.stride=1
self.padding = int((kernel_size-1) / 2)
self.kernel_size_pool=kernel_size_pool
self.stride_pool=stride_pool
self.Wxi = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=True)
self.Whi = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=False)
self.Wxf = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=True)
self.Whf = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=False)
self.Wxc = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=True)
self.Whc = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=False)
self.Wxo = nn.Conv1d(self.input_channels, self.hidden_channels, self.kernel_size, self.stride,self.padding, bias=True)
self.Who = nn.Conv1d(self.hidden_channels, self.hidden_channels, self.kernel_size, self.stride, self.padding, bias=False)
self.max_pool = nn.MaxPool1d(self.kernel_size_pool, stride=self.stride_pool)
self.batch = nn.BatchNorm1d(self.hidden_channels)
self.Wci = None
self.Wcf = None
self.Wco = None
def forward(self, x, h, c):
ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)
cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)
cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))
co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)
ch = co * torch.tanh(cc)
ch_pool=self.batch(self.max_pool(ch))
return ch_pool, ch, cc
def init_hidden(self, batch_size, hidden, shape):
if self.Wci is None:
self.Wci = nn.Parameter(torch.zeros(1, hidden, shape)).cuda()
self.Wcf = nn.Parameter(torch.zeros(1, hidden, shape)).cuda()
self.Wco = nn.Parameter(torch.zeros(1, hidden, shape)).cuda()
return (nn.Parameter(torch.zeros(batch_size, hidden, shape)).cuda(),
nn.Parameter(torch.zeros(batch_size, hidden, shape)).cuda())
class ConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
# kernel size is also a list, same length as hidden_channels
def __init__(self, input_channels, hidden_channels, kernel_size, step):
super(ConvLSTM, self).__init__()
assert len(hidden_channels)==len(kernel_size), "size mismatch"
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self._all_layers = []
self.num_labels=4
self.linear_dim=16*18
self.device= torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.classification = nn.Linear(self.linear_dim, self.num_labels)
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size[i])
setattr(self, name, cell)
self._all_layers.append(cell)
def forward(self, input, target):
# input should be a list of inputs, like a time stamp, maybe 1280 for 100 times.
internal_state = []
outputs = []
for step in range(self.step):
x = input[step]
for i in range(self.num_layers):
name = 'cell{}'.format(i)
if step == 0:
bsize, _, shape = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],
shape=shape)
internal_state.append((h, c))
# do forward
(h, c) = internal_state[i]
x, new_h, new_c = getattr(self, name)(x, h, c)
internal_state[i] = (new_h, new_c)
outputs.append(x)
## mean pooling and loss function
out=[torch.unsqueeze(o, dim=3) for o in outputs]
out=torch.flatten(torch.mean(torch.cat(out,dim=3),dim=3),start_dim=1)
out = self.classification(out)
loss = F.cross_entropy(out, torch.max(target, 1)[1].to(self.device))
return torch.unsqueeze(out,dim=0), torch.unsqueeze(loss, dim=0)
| 42.671795
| 175
| 0.651785
| 2,288
| 16,642
| 4.531031
| 0.08479
| 0.051992
| 0.046397
| 0.037041
| 0.80573
| 0.788078
| 0.785184
| 0.781036
| 0.770425
| 0.740523
| 0
| 0.010782
| 0.230922
| 16,642
| 390
| 176
| 42.671795
| 0.799203
| 0.070124
| 0
| 0.608059
| 0
| 0
| 0.004793
| 0
| 0
| 0
| 0
| 0
| 0.007326
| 1
| 0.062271
| false
| 0
| 0.021978
| 0
| 0.14652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cbf8a1ef0f33878d804eb957ddcbefc421928a1b
| 40
|
py
|
Python
|
problem/01000~09999/09498/9498.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-19T16:37:44.000Z
|
2019-04-19T16:37:44.000Z
|
problem/01000~09999/09498/9498.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 1
|
2019-04-20T11:42:44.000Z
|
2019-04-20T11:42:44.000Z
|
problem/01000~09999/09498/9498.py3.py
|
njw1204/BOJ-AC
|
1de41685725ae4657a7ff94e413febd97a888567
|
[
"MIT"
] | 3
|
2019-04-19T16:37:47.000Z
|
2021-10-25T00:45:00.000Z
|
print(("F"*6+"DCBAA")[int(input())//10])
| 40
| 40
| 0.55
| 7
| 40
| 3.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0
| 40
| 1
| 40
| 40
| 0.475
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0212be2b426e881f46ce9b5faa0a4d6cd2b0e659
| 11
|
py
|
Python
|
py2codes/py2_exec.py
|
rhabacker/lib2to3import
|
36102fa844bf18234053d96f6b9b90f5c6068e87
|
[
"MIT"
] | null | null | null |
py2codes/py2_exec.py
|
rhabacker/lib2to3import
|
36102fa844bf18234053d96f6b9b90f5c6068e87
|
[
"MIT"
] | 1
|
2020-11-14T01:39:18.000Z
|
2020-11-17T07:54:28.000Z
|
py2codes/py2_exec.py
|
rhabacker/lib2to3import
|
36102fa844bf18234053d96f6b9b90f5c6068e87
|
[
"MIT"
] | 2
|
2019-08-12T09:58:05.000Z
|
2021-03-18T17:13:06.000Z
|
exec "123"
| 5.5
| 10
| 0.636364
| 2
| 11
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.181818
| 11
| 1
| 11
| 11
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
028456bd34d14ef1d7f23ca7f443c4b9f0404a35
| 4,071
|
py
|
Python
|
waferscreen/inst_control/inactive/agilent_34970A.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | 1
|
2021-07-30T19:06:07.000Z
|
2021-07-30T19:06:07.000Z
|
waferscreen/inst_control/inactive/agilent_34970A.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | 8
|
2021-04-22T20:47:48.000Z
|
2021-07-30T19:06:01.000Z
|
waferscreen/inst_control/inactive/agilent_34970A.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | null | null | null |
import serial
class Agilent34970A:
def __init__(self):
self.timeout = 10
self.baudrate = 4800
self.bytesize = serial.EIGHTBITS
self.parity = serial.PARITY_NONE
self.stopbits = serial.STOPBITS_ONE
xonxoff = True
self.s = serial.Serial(port='/dev/ttyUSB3', timeout=self.timeout, baudrate=self.baudrate,
bytesize=self.bytesize, parity=self.parity, stopbits=self.stopbits, xonxoff=True)
def reset(self):
self.s.write('*RST\n')
def closeSwitch(self, board, switch):
self.s.write('ROUT:CLOS (@' + str(board) + str(switch).zfill(2) + ')\n')
def checkClosed(self, board, switch):
self.s.write('ROUT:CLOS? (@' + str(board) + str(switch).zfill(2) + ')\n')
sto = self.s.readline()
if int(sto) == 0:
print 'Switch open'
elif int(sto) == 1:
print 'Switch closed'
def measureResistance(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:RES? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:RES? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureFrequency(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:FREQ? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:FREQ? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measurePeriod(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:PER? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:PER? ' + str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureACCurrent(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:CURR:AC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:CURR:AC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureDCCurrent(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:CURR:DC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:CURR:DC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureACVoltage(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:VOLT:AC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:VOLT:AC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
def measureDCVoltage(self, board, switch, Range="AUTO", Resolution="AUTO"):
if Resolution == "AUTO":
self.s.write(
'MEAS:VOLT:DC? ' + str(Range) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
else:
self.s.write(
'MEAS:VOLT:DC? ' +
str(Range) + ',' + str(Resolution) + ',(@' + str(board) + str(switch).zfill(2) + ')\n')
return float(self.s.readline())
| 39.911765
| 119
| 0.503316
| 457
| 4,071
| 4.47046
| 0.146608
| 0.063632
| 0.083211
| 0.133138
| 0.73862
| 0.73862
| 0.73862
| 0.73862
| 0.72883
| 0.72883
| 0
| 0.010537
| 0.300663
| 4,071
| 101
| 120
| 40.306931
| 0.70706
| 0
| 0
| 0.47561
| 0
| 0
| 0.10366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012195
| null | null | 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a3c1f4058904f112a823d0ce1fa4d2ba743c174
| 6,151
|
py
|
Python
|
models/grammateus.py
|
monotasker/Online-Critical-Pseudepigrapha
|
456ef828834aeaedda8204a6107729f277063b9f
|
[
"W3C"
] | 1
|
2017-09-03T12:59:19.000Z
|
2017-09-03T12:59:19.000Z
|
models/grammateus.py
|
OnlineCriticalPseudepigrapha/Online-Critical-Pseudepigrapha
|
456ef828834aeaedda8204a6107729f277063b9f
|
[
"W3C"
] | 18
|
2018-05-11T17:08:48.000Z
|
2018-06-29T20:15:37.000Z
|
models/grammateus.py
|
monotasker/Online-Critical-Pseudepigrapha
|
456ef828834aeaedda8204a6107729f277063b9f
|
[
"W3C"
] | 1
|
2017-09-17T16:13:45.000Z
|
2017-09-17T16:13:45.000Z
|
#! /usr/bin/python2.7
# -*- coding: utf8 -*-
import datetime
# from plugin_ajaxselect import AjaxSelect
if 0:
from gluon import db, Field, auth, IS_EMPTY_OR, IS_IN_DB, current, URL
response = current.response
response.files.insert(5, URL('static',
'plugin_ajaxselect/plugin_ajaxselect.js'))
#response.files.append(URL('static', 'plugin_ajaxselect/plugin_ajaxselect.css'))
response.files.append(URL('static', 'plugin_listandedit/plugin_listandedit.css'))
db.define_table('genres',
Field('genre', 'string'),
format='%(genre)s')
db.define_table('biblical_figures',
Field('figure', 'string'),
format='%(figure)s')
db.define_table('draftdocs',
Field('name'),
Field('filename'),
Field('editor', db.auth_user),
Field('editor2', db.auth_user),
Field('editor3', db.auth_user),
Field('editor4', db.auth_user),
Field('assistant_editor', db.auth_user),
Field('assistant_editor2', db.auth_user),
Field('assistant_editor3', db.auth_user),
Field('proofreader', db.auth_user),
Field('proofreader2', db.auth_user),
Field('proofreader3', db.auth_user),
Field('version', 'double'),
Field('introduction', 'text'),
Field('provenance', 'text'),
Field('themes', 'text'),
Field('status', 'text'),
Field('manuscripts', 'text'),
Field('bibliography', 'text'),
Field('corrections', 'text'),
Field('sigla', 'text'),
Field('copyright', 'text'),
Field('citation_format', 'text'),
Field('genres', 'list:reference genres'),
Field('figures', 'list:reference biblical_figures'),
format='%(name)s')
db.draftdocs.editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.editor4.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.assistant_editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.proofreader3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.draftdocs.genres.requires = IS_EMPTY_OR(IS_IN_DB(db, 'genres.id',
db.genres._format,
multiple=True))
db.draftdocs.figures.requires = IS_EMPTY_OR(IS_IN_DB(db, 'biblical_figures.id',
db.biblical_figures._format,
multiple=True))
db.define_table('docs',
Field('name'),
Field('filename'),
Field('editor', db.auth_user),
Field('editor2', db.auth_user),
Field('editor3', db.auth_user),
Field('editor4', db.auth_user),
Field('assistant_editor', db.auth_user),
Field('assistant_editor2', db.auth_user),
Field('assistant_editor3', db.auth_user),
Field('proofreader', db.auth_user),
Field('proofreader2', db.auth_user),
Field('proofreader3', db.auth_user),
Field('version', 'double'),
Field('introduction', 'text'),
Field('provenance', 'text'),
Field('themes', 'text'),
Field('status', 'text'),
Field('manuscripts', 'text'),
Field('bibliography', 'text'),
Field('corrections', 'text'),
Field('sigla', 'text'),
Field('copyright', 'text'),
Field('citation_format', 'text'),
Field('genres', 'list:reference genres'),
Field('figures', 'list:reference biblical_figures'),
format='%(name)s')
db.docs.editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.editor4.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.assistant_editor3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader2.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.proofreader3.requires = IS_EMPTY_OR(IS_IN_DB(db, 'auth_user.id', db.auth_user._format))
db.docs.genres.requires = IS_EMPTY_OR(IS_IN_DB(db, 'genres.id',
db.genres._format,
multiple=True))
db.docs.figures.requires = IS_EMPTY_OR(IS_IN_DB(db, 'biblical_figures.id',
db.biblical_figures._format,
multiple=True))
db.define_table('biblio',
Field('record'),
format='%(record)s')
db.define_table('pages',
Field('page_label', 'string'),
Field('title', 'string'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime', default=datetime.datetime.utcnow()),
format='%(title)s')
db.define_table('news',
Field('news_token', 'string'),
Field('title', 'string'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime', default=datetime.datetime.utcnow()),
format='%(title)s')
db.define_table('bugs',
Field('title'),
Field('body', 'text'),
Field('poster', db.auth_user, default=auth.user_id),
Field('post_date', 'datetime'),
format='%(title)s')
| 44.572464
| 105
| 0.662656
| 846
| 6,151
| 4.544917
| 0.105201
| 0.137321
| 0.163849
| 0.071521
| 0.881665
| 0.881665
| 0.842653
| 0.842653
| 0.842653
| 0.842653
| 0
| 0.006439
| 0.166802
| 6,151
| 137
| 106
| 44.89781
| 0.743805
| 0.026175
| 0
| 0.598361
| 0
| 0
| 0.229201
| 0.013197
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016393
| 0
| 0.016393
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a44e929a11797422604acb7129e5a00747b908f
| 2,350
|
py
|
Python
|
gb/tests/test_gibbs_sampler.py
|
myozka/granger-busca
|
e6922f85aa58ab0809951ec4d60b5df43d6c74e8
|
[
"BSD-3-Clause"
] | 5
|
2018-09-06T13:37:04.000Z
|
2019-12-16T13:53:26.000Z
|
gb/tests/test_gibbs_sampler.py
|
myozka/granger-busca
|
e6922f85aa58ab0809951ec4d60b5df43d6c74e8
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T06:08:25.000Z
|
2021-07-13T18:10:09.000Z
|
gb/tests/test_gibbs_sampler.py
|
myozka/granger-busca
|
e6922f85aa58ab0809951ec4d60b5df43d6c74e8
|
[
"BSD-3-Clause"
] | 4
|
2020-03-30T14:54:27.000Z
|
2021-09-23T18:48:14.000Z
|
# -*- coding: utf8
from gb.randomkit.random import RNG
from gb.samplers import BaseSampler
from gb.samplers import CollapsedGibbsSampler
from gb.stamps import Timestamps
from gb.sloppy import SloppyCounter
from numpy.testing import assert_equal
import numpy as np
def test_get_probability():
d = {}
d[0] = [1, 2, 3, 4, 5, 6, 7]
d[1] = [11, 12, 13]
stamps = Timestamps(d)
causes = stamps._get_causes(0)
causes[0] = 0
causes[1] = 0
causes[2] = 0
causes[3] = 1
causes[4] = 1
causes[5] = 1
causes[6] = 1
causes = stamps._get_causes(1)
causes[0] = 0
causes[1] = 0
causes[2] = 1
nb = np.array([5, 5], dtype='uint64')
init_state = np.array([[5, 5]], dtype='uint64')
id_ = 0
sloppy = SloppyCounter(1, 9999, nb, init_state)
sampler = CollapsedGibbsSampler(BaseSampler(stamps, sloppy, id_, 0.1,
RNG()), 2)
sampler._set_current_process(0)
assert_equal(0.5961538461538461, sampler._get_probability(0))
assert_equal(0.7884615384615383, sampler._get_probability(1))
sampler._set_current_process(1)
assert_equal(0.40384615384615385, sampler._get_probability(0))
assert_equal(0.21153846153846154, sampler._get_probability(1))
def test_inc_dec():
d = {}
d[0] = [1, 2, 3, 4, 5, 6, 7]
d[1] = [11, 12, 13]
stamps = Timestamps(d)
causes = stamps._get_causes(0)
causes[0] = 0
causes[1] = 0
causes[2] = 0
causes[3] = 1
causes[4] = 1
causes[5] = 1
causes[6] = 1
causes = stamps._get_causes(1)
causes[0] = 0
causes[1] = 0
causes[2] = 1
nb = np.array([5, 5], dtype='uint64')
init_state = np.array([[5, 5]], dtype='uint64')
id_ = 0
sloppy = SloppyCounter(1, 9999, nb, init_state)
sampler = CollapsedGibbsSampler(BaseSampler(stamps, sloppy, id_, 0.1,
RNG()), 2)
sampler._set_current_process(0)
assert_equal(0.5961538461538461, sampler._get_probability(0))
assert_equal(0.7884615384615383, sampler._get_probability(1))
sampler._inc_one(0)
assert_equal(0.6612903225806451, sampler._get_probability(0))
assert_equal(0.7884615384615383, sampler._get_probability(1))
sampler._dec_one(0)
assert_equal(0.5961538461538461, sampler._get_probability(0))
| 28.313253
| 73
| 0.631064
| 325
| 2,350
| 4.375385
| 0.178462
| 0.059072
| 0.075949
| 0.073136
| 0.746132
| 0.732771
| 0.732771
| 0.708861
| 0.708861
| 0.672996
| 0
| 0.155221
| 0.237872
| 2,350
| 82
| 74
| 28.658537
| 0.638749
| 0.006809
| 0
| 0.776119
| 0
| 0
| 0.010292
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 1
| 0.029851
| false
| 0
| 0.104478
| 0
| 0.134328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a4e07f2b94ab476e5ae09d4fd2d5f84fb6f63e2
| 72
|
py
|
Python
|
__init__.py
|
VASemenov/Genetica
|
5f51159e182a628c2d33c8a401719924b3611df5
|
[
"MIT"
] | null | null | null |
__init__.py
|
VASemenov/Genetica
|
5f51159e182a628c2d33c8a401719924b3611df5
|
[
"MIT"
] | null | null | null |
__init__.py
|
VASemenov/Genetica
|
5f51159e182a628c2d33c8a401719924b3611df5
|
[
"MIT"
] | null | null | null |
from genetica.dna import DNA, genify
from genetica.model import Genetica
| 36
| 36
| 0.847222
| 11
| 72
| 5.545455
| 0.545455
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 37
| 36
| 0.953125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a6c3376aee63cfa4176eec2e2221796087f1da4
| 55
|
py
|
Python
|
app/cli/plugin/__init__.py
|
lonless0/flask_project
|
f5d6c5c7655e54d95069b469e3d470eda7a05cb7
|
[
"MIT"
] | 786
|
2019-01-15T14:30:37.000Z
|
2022-03-28T08:53:39.000Z
|
app/cli/plugin/__init__.py
|
lonless0/flask_project
|
f5d6c5c7655e54d95069b469e3d470eda7a05cb7
|
[
"MIT"
] | 107
|
2019-01-18T05:15:16.000Z
|
2022-03-16T07:13:05.000Z
|
app/cli/plugin/__init__.py
|
lonless0/flask_project
|
f5d6c5c7655e54d95069b469e3d470eda7a05cb7
|
[
"MIT"
] | 222
|
2019-01-16T14:44:23.000Z
|
2022-03-23T11:33:00.000Z
|
from .generator import generate
from .init import init
| 18.333333
| 31
| 0.818182
| 8
| 55
| 5.625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 55
| 2
| 32
| 27.5
| 0.957447
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce517a5ddc247572eac79c178a88597e1d88b706
| 43
|
py
|
Python
|
models/__init__.py
|
salesforce/DataHardness
|
18b9231f8d08f35b2452e6357b7d6b31f21c695c
|
[
"BSD-3-Clause"
] | 3
|
2021-11-18T22:48:28.000Z
|
2022-01-08T08:02:31.000Z
|
models/__init__.py
|
salesforce/DataHardness
|
18b9231f8d08f35b2452e6357b7d6b31f21c695c
|
[
"BSD-3-Clause"
] | null | null | null |
models/__init__.py
|
salesforce/DataHardness
|
18b9231f8d08f35b2452e6357b7d6b31f21c695c
|
[
"BSD-3-Clause"
] | 1
|
2021-11-18T22:48:32.000Z
|
2021-11-18T22:48:32.000Z
|
from models.glow import Glow, GlowAdditive
| 21.5
| 42
| 0.837209
| 6
| 43
| 6
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 1
| 43
| 43
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ce8e42b2a35ed5fd98c1fefc1db9f29031a082bc
| 2,270
|
py
|
Python
|
migrations/versions/2019_03_04_optional_chart_and_table_classifications.py.py
|
AlexKouzy/ethnicity-facts-and-figures-publisher
|
18ab2495a8633f585e18e607c7f75daa564a053d
|
[
"MIT"
] | 1
|
2021-10-06T13:48:36.000Z
|
2021-10-06T13:48:36.000Z
|
migrations/versions/2019_03_04_optional_chart_and_table_classifications.py.py
|
AlexKouzy/ethnicity-facts-and-figures-publisher
|
18ab2495a8633f585e18e607c7f75daa564a053d
|
[
"MIT"
] | 116
|
2018-11-02T17:20:47.000Z
|
2022-02-09T11:06:22.000Z
|
migrations/versions/2019_03_04_optional_chart_and_table_classifications.py.py
|
racedisparityaudit/rd_cms
|
a12f0e3f5461cc41eed0077ed02e11efafc5dd76
|
[
"MIT"
] | 2
|
2018-11-09T16:47:35.000Z
|
2020-04-09T13:06:48.000Z
|
"""Make some fields on Chart and Table nullable
We want to copy chart and table data across to these tables but have no way to add a
classification for each one, so we'll have to live with some nulls in here.
Revision ID: 2019_03_04_make_fields_nullable
Revises: 2019_03_04_chart_table_settings
Create Date: 2019-03-05 16:38:12.835894
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "2019_03_04_make_fields_nullable"
down_revision = "2019_03_04_chart_table_settings"
branch_labels = None
depends_on = None
def upgrade():
op.alter_column("dimension_chart", "classification_id", existing_type=sa.VARCHAR(length=255), nullable=True)
op.alter_column("dimension_chart", "includes_all", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("dimension_chart", "includes_parents", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("dimension_chart", "includes_unknown", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("dimension_table", "classification_id", existing_type=sa.VARCHAR(length=255), nullable=True)
op.alter_column("dimension_table", "includes_all", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("dimension_table", "includes_parents", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("dimension_table", "includes_unknown", existing_type=sa.BOOLEAN(), nullable=True)
def downgrade():
op.alter_column("dimension_table", "includes_unknown", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("dimension_table", "includes_parents", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("dimension_table", "includes_all", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("dimension_table", "classification_id", existing_type=sa.VARCHAR(length=255), nullable=False)
op.alter_column("dimension_chart", "includes_unknown", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("dimension_chart", "includes_parents", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("dimension_chart", "includes_all", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("dimension_chart", "classification_id", existing_type=sa.VARCHAR(length=255), nullable=False)
| 54.047619
| 113
| 0.781498
| 320
| 2,270
| 5.271875
| 0.25
| 0.06639
| 0.123296
| 0.208654
| 0.791938
| 0.791938
| 0.73029
| 0.73029
| 0.727919
| 0.727919
| 0
| 0.031189
| 0.096035
| 2,270
| 41
| 114
| 55.365854
| 0.790936
| 0.164317
| 0
| 0
| 0
| 0
| 0.289042
| 0.032822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cea540d8d7c6742e25322196c14ce8e5fffdddeb
| 57,014
|
py
|
Python
|
src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py
|
xliiv/ralph_assets
|
73e5e46db380c9a8dafb9ca1bd5abe47d5733385
|
[
"Apache-2.0"
] | null | null | null |
src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py
|
xliiv/ralph_assets
|
73e5e46db380c9a8dafb9ca1bd5abe47d5733385
|
[
"Apache-2.0"
] | null | null | null |
src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py
|
xliiv/ralph_assets
|
73e5e46db380c9a8dafb9ca1bd5abe47d5733385
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Licence', fields ['sn']
db.delete_unique('ralph_assets_licence', ['sn'])
# Adding model 'TransitionsHistory'
db.create_table('ralph_assets_transitionshistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('transition', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.Transition'])),
('logged_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'logged user', to=orm['auth.User'])),
('affected_user', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'affected user', to=orm['auth.User'])),
('report_filename', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('uid', self.gf('django.db.models.fields.CharField')(max_length=36)),
('report_file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('ralph_assets', ['TransitionsHistory'])
# Adding M2M table for field assets on 'TransitionsHistory'
db.create_table('ralph_assets_transitionshistory_assets', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('transitionshistory', models.ForeignKey(orm['ralph_assets.transitionshistory'], null=False)),
('asset', models.ForeignKey(orm['ralph_assets.asset'], null=False))
))
db.create_unique('ralph_assets_transitionshistory_assets', ['transitionshistory_id', 'asset_id'])
# Adding model 'Attachment'
db.create_table('ralph_assets_attachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True)),
('uploaded_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
))
db.send_create_signal('ralph_assets', ['Attachment'])
# Adding model 'CoaOemOs'
db.create_table('ralph_assets_coaoemos', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
))
db.send_create_signal('ralph_assets', ['CoaOemOs'])
# Adding model 'Action'
db.create_table('ralph_assets_action', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
))
db.send_create_signal('ralph_assets', ['Action'])
# Adding model 'ReportOdtSource'
db.create_table('ralph_assets_reportodtsource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
('template', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('ralph_assets', ['ReportOdtSource'])
# Adding model 'Service'
db.create_table('ralph_assets_service', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('profit_center', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('cost_center', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
))
db.send_create_signal('ralph_assets', ['Service'])
# Adding model 'Transition'
db.create_table('ralph_assets_transition', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
('from_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('to_status', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
))
db.send_create_signal('ralph_assets', ['Transition'])
# Adding M2M table for field actions on 'Transition'
db.create_table('ralph_assets_transition_actions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('transition', models.ForeignKey(orm['ralph_assets.transition'], null=False)),
('action', models.ForeignKey(orm['ralph_assets.action'], null=False))
))
db.create_unique('ralph_assets_transition_actions', ['transition_id', 'action_id'])
# Adding model 'LicenceHistoryChange'
db.create_table('ralph_assets_licencehistorychange', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('licence', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['ralph_assets.Licence'], null=True, on_delete=models.SET_NULL, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('field_name', self.gf('django.db.models.fields.CharField')(default=u'', max_length=64)),
('old_value', self.gf('django.db.models.fields.CharField')(default=u'', max_length=255)),
('new_value', self.gf('django.db.models.fields.CharField')(default=u'', max_length=255)),
))
db.send_create_signal('ralph_assets', ['LicenceHistoryChange'])
# Adding model 'ImportProblem'
db.create_table('ralph_assets_importproblem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('severity', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('ralph_assets', ['ImportProblem'])
# Deleting field 'Licence.bought_date'
db.delete_column('ralph_assets_licence', 'bought_date')
# Deleting field 'Licence.used'
db.delete_column('ralph_assets_licence', 'used')
# Adding field 'Licence.invoice_date'
db.add_column('ralph_assets_licence', 'invoice_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'Licence.provider'
db.add_column('ralph_assets_licence', 'provider',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Licence.invoice_no'
db.add_column('ralph_assets_licence', 'invoice_no',
self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True),
keep_default=False)
# Adding M2M table for field assets on 'Licence'
db.create_table('ralph_assets_licence_assets', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('licence', models.ForeignKey(orm['ralph_assets.licence'], null=False)),
('asset', models.ForeignKey(orm['ralph_assets.asset'], null=False))
))
db.create_unique('ralph_assets_licence_assets', ['licence_id', 'asset_id'])
# Adding M2M table for field users on 'Licence'
db.create_table('ralph_assets_licence_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('licence', models.ForeignKey(orm['ralph_assets.licence'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('ralph_assets_licence_users', ['licence_id', 'user_id'])
# Adding M2M table for field attachments on 'Licence'
db.create_table('ralph_assets_licence_attachments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('licence', models.ForeignKey(orm['ralph_assets.licence'], null=False)),
('attachment', models.ForeignKey(orm['ralph_assets.attachment'], null=False))
))
db.create_unique('ralph_assets_licence_attachments', ['licence_id', 'attachment_id'])
# Changing field 'Licence.niw'
db.alter_column('ralph_assets_licence', 'niw', self.gf('django.db.models.fields.CharField')(default='N/A', unique=True, max_length=50))
# Adding unique constraint on 'Licence', fields ['niw']
db.create_unique('ralph_assets_licence', ['niw'])
# Changing field 'Licence.price'
db.alter_column('ralph_assets_licence', 'price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2))
# Changing field 'Licence.sn'
db.alter_column('ralph_assets_licence', 'sn', self.gf('django.db.models.fields.TextField')(null=True))
# Deleting field 'Asset.category'
db.delete_column('ralph_assets_asset', 'category_id')
# Adding field 'Asset.location'
db.add_column('ralph_assets_asset', 'location',
self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True),
keep_default=False)
# Adding field 'Asset.service_name'
db.add_column('ralph_assets_asset', 'service_name',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.Service'], null=True, blank=True),
keep_default=False)
# Adding field 'Asset.loan_end_date'
db.add_column('ralph_assets_asset', 'loan_end_date',
self.gf('django.db.models.fields.DateField')(default=None, null=True, blank=True),
keep_default=False)
# Adding field 'Asset.note'
db.add_column('ralph_assets_asset', 'note',
self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True),
keep_default=False)
# Adding M2M table for field attachments on 'Asset'
db.create_table('ralph_assets_asset_attachments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('asset', models.ForeignKey(orm['ralph_assets.asset'], null=False)),
('attachment', models.ForeignKey(orm['ralph_assets.attachment'], null=False))
))
db.create_unique('ralph_assets_asset_attachments', ['asset_id', 'attachment_id'])
# Changing field 'Asset.support_period'
db.alter_column('ralph_assets_asset', 'support_period', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
# Changing field 'Asset.source'
db.alter_column('ralph_assets_asset', 'source', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
# Changing field 'Asset.status'
db.alter_column('ralph_assets_asset', 'status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
# Changing field 'Asset.price'
db.alter_column('ralph_assets_asset', 'price', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=2))
# Changing field 'Asset.niw'
db.alter_column('ralph_assets_asset', 'niw', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Deleting field 'OfficeInfo.version'
db.delete_column('ralph_assets_officeinfo', 'version')
# Deleting field 'OfficeInfo.last_logged_user'
db.delete_column('ralph_assets_officeinfo', 'last_logged_user')
# Deleting field 'OfficeInfo.date_of_last_inventory'
db.delete_column('ralph_assets_officeinfo', 'date_of_last_inventory')
# Deleting field 'OfficeInfo.attachment'
db.delete_column('ralph_assets_officeinfo', 'attachment')
# Deleting field 'OfficeInfo.license_type'
db.delete_column('ralph_assets_officeinfo', 'license_type')
# Adding field 'OfficeInfo.coa_number'
db.add_column('ralph_assets_officeinfo', 'coa_number',
self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.coa_oem_os'
db.add_column('ralph_assets_officeinfo', 'coa_oem_os',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.CoaOemOs'], null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.imei'
db.add_column('ralph_assets_officeinfo', 'imei',
self.gf('django.db.models.fields.CharField')(max_length=18, unique=True, null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.purpose'
db.add_column('ralph_assets_officeinfo', 'purpose',
self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
# Changing field 'OfficeInfo.license_key'
db.alter_column('ralph_assets_officeinfo', 'license_key', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Removing unique constraint on 'Licence', fields ['niw']
db.delete_unique('ralph_assets_licence', ['niw'])
# Deleting model 'TransitionsHistory'
db.delete_table('ralph_assets_transitionshistory')
# Removing M2M table for field assets on 'TransitionsHistory'
db.delete_table('ralph_assets_transitionshistory_assets')
# Deleting model 'Attachment'
db.delete_table('ralph_assets_attachment')
# Deleting model 'CoaOemOs'
db.delete_table('ralph_assets_coaoemos')
# Deleting model 'Action'
db.delete_table('ralph_assets_action')
# Deleting model 'ReportOdtSource'
db.delete_table('ralph_assets_reportodtsource')
# Deleting model 'Service'
db.delete_table('ralph_assets_service')
# Deleting model 'Transition'
db.delete_table('ralph_assets_transition')
# Removing M2M table for field actions on 'Transition'
db.delete_table('ralph_assets_transition_actions')
# Deleting model 'LicenceHistoryChange'
db.delete_table('ralph_assets_licencehistorychange')
# Deleting model 'ImportProblem'
db.delete_table('ralph_assets_importproblem')
# Adding field 'Licence.bought_date'
db.add_column('ralph_assets_licence', 'bought_date',
self.gf('django.db.models.fields.DateField')(default=None),
keep_default=False)
# Adding field 'Licence.used'
db.add_column('ralph_assets_licence', 'used',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Deleting field 'Licence.invoice_date'
db.delete_column('ralph_assets_licence', 'invoice_date')
# Deleting field 'Licence.provider'
db.delete_column('ralph_assets_licence', 'provider')
# Deleting field 'Licence.invoice_no'
db.delete_column('ralph_assets_licence', 'invoice_no')
# Removing M2M table for field assets on 'Licence'
db.delete_table('ralph_assets_licence_assets')
# Removing M2M table for field users on 'Licence'
db.delete_table('ralph_assets_licence_users')
# Removing M2M table for field attachments on 'Licence'
db.delete_table('ralph_assets_licence_attachments')
# Changing field 'Licence.niw'
db.alter_column('ralph_assets_licence', 'niw', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Changing field 'Licence.price'
db.alter_column('ralph_assets_licence', 'price', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2))
# Changing field 'Licence.sn'
db.alter_column('ralph_assets_licence', 'sn', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200, null=True))
# Adding unique constraint on 'Licence', fields ['sn']
db.create_unique('ralph_assets_licence', ['sn'])
# Adding field 'Asset.category'
db.add_column('ralph_assets_asset', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ralph_assets.AssetCategory'], null=True, blank=True),
keep_default=False)
# Deleting field 'Asset.location'
db.delete_column('ralph_assets_asset', 'location')
# Deleting field 'Asset.service_name'
db.delete_column('ralph_assets_asset', 'service_name_id')
# Deleting field 'Asset.loan_end_date'
db.delete_column('ralph_assets_asset', 'loan_end_date')
# Deleting field 'Asset.note'
db.delete_column('ralph_assets_asset', 'note')
# Removing M2M table for field attachments on 'Asset'
db.delete_table('ralph_assets_asset_attachments')
# Changing field 'Asset.support_period'
db.alter_column('ralph_assets_asset', 'support_period', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
# Changing field 'Asset.source'
db.alter_column('ralph_assets_asset', 'source', self.gf('django.db.models.fields.PositiveIntegerField')(default=None))
# Changing field 'Asset.status'
db.alter_column('ralph_assets_asset', 'status', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
# Changing field 'Asset.price'
db.alter_column('ralph_assets_asset', 'price', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2))
# Changing field 'Asset.niw'
db.alter_column('ralph_assets_asset', 'niw', self.gf('django.db.models.fields.CharField')(max_length=50, null=True))
# Adding field 'OfficeInfo.version'
db.add_column('ralph_assets_officeinfo', 'version',
self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.last_logged_user'
db.add_column('ralph_assets_officeinfo', 'last_logged_user',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.date_of_last_inventory'
db.add_column('ralph_assets_officeinfo', 'date_of_last_inventory',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.attachment'
db.add_column('ralph_assets_officeinfo', 'attachment',
self.gf('django.db.models.fields.files.FileField')(default=None, max_length=100, blank=True),
keep_default=False)
# Adding field 'OfficeInfo.license_type'
db.add_column('ralph_assets_officeinfo', 'license_type',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Deleting field 'OfficeInfo.coa_number'
db.delete_column('ralph_assets_officeinfo', 'coa_number')
# Deleting field 'OfficeInfo.coa_oem_os'
db.delete_column('ralph_assets_officeinfo', 'coa_oem_os_id')
# Deleting field 'OfficeInfo.imei'
db.delete_column('ralph_assets_officeinfo', 'imei')
# Deleting field 'OfficeInfo.purpose'
db.delete_column('ralph_assets_officeinfo', 'purpose')
# Changing field 'OfficeInfo.license_key'
db.alter_column('ralph_assets_officeinfo', 'license_key', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
models = {
'account.profile': {
'Meta': {'object_name': 'Profile'},
'activation_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '153'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'home_page': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '1', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'nick': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ralph_assets.action': {
'Meta': {'object_name': 'Action'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.asset': {
'Meta': {'object_name': 'Asset'},
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['ralph_assets.Attachment']", 'null': 'True', 'blank': 'True'}),
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'delivery_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deprecation_rate': ('django.db.models.fields.DecimalField', [], {'default': '25', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'device_info': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ralph_assets.DeviceInfo']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'force_deprecation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'invoice_no': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'loan_end_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetModel']", 'on_delete': 'models.PROTECT'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'niw': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'office_info': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ralph_assets.OfficeInfo']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'order_no': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owner'", 'null': 'True', 'to': "orm['auth.User']"}),
'part_info': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ralph_assets.PartInfo']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'production_use_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'production_year': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'property_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetOwner']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'provider_order_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'request_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'service_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.Service']", 'null': 'True', 'blank': 'True'}),
'slots': ('django.db.models.fields.FloatField', [], {'default': '0', 'max_length': '64'}),
'sn': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'support_period': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'support_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'support_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'support_void_reporting': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'task_url': ('django.db.models.fields.URLField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'user'", 'null': 'True', 'to': "orm['auth.User']"}),
'warehouse': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.Warehouse']", 'on_delete': 'models.PROTECT'})
},
'ralph_assets.assetcategory': {
'Meta': {'object_name': 'AssetCategory'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'is_blade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': "orm['ralph_assets.AssetCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'primary_key': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'ralph_assets.assethistorychange': {
'Meta': {'object_name': 'AssetHistoryChange'},
'asset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.Asset']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'device_info': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.DeviceInfo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'office_info': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.OfficeInfo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'old_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'part_info': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.PartInfo']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'ralph_assets.assetmanufacturer': {
'Meta': {'object_name': 'AssetManufacturer'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.assetmodel': {
'Meta': {'object_name': 'AssetModel'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetCategory']", 'null': 'True', 'blank': 'True'}),
'cores_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'height_of_device': ('django.db.models.fields.FloatField', [], {'default': '0', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetManufacturer']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'power_consumption': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'})
},
'ralph_assets.assetowner': {
'Meta': {'object_name': 'AssetOwner'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.attachment': {
'Meta': {'object_name': 'Attachment'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'ralph_assets.coaoemos': {
'Meta': {'object_name': 'CoaOemOs'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.deviceinfo': {
'Meta': {'object_name': 'DeviceInfo'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'rack': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'ralph_device_id': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'u_height': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'u_level': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'ralph_assets.importproblem': {
'Meta': {'object_name': 'ImportProblem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'severity': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'ralph_assets.licence': {
'Meta': {'object_name': 'Licence'},
'accounting_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'asset_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ralph_assets.Asset']", 'symmetrical': 'False'}),
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['ralph_assets.Attachment']", 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'invoice_no': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'licence_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.LicenceType']", 'on_delete': 'models.PROTECT'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetManufacturer']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'niw': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'number_bought': ('django.db.models.fields.IntegerField', [], {}),
'order_no': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': "orm['ralph_assets.Licence']"}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'property_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.AssetOwner']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sn': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'software_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.SoftwareCategory']", 'on_delete': 'models.PROTECT'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'valid_thru': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'ralph_assets.licencehistorychange': {
'Meta': {'object_name': 'LicenceHistoryChange'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'field_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'licence': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['ralph_assets.Licence']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'new_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'old_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'ralph_assets.licencetype': {
'Meta': {'object_name': 'LicenceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.officeinfo': {
'Meta': {'object_name': 'OfficeInfo'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'coa_number': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'coa_oem_os': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.CoaOemOs']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imei': ('django.db.models.fields.CharField', [], {'max_length': '18', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'license_key': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'purpose': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'ralph_assets.partinfo': {
'Meta': {'object_name': 'PartInfo'},
'barcode_salvaged': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'device': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'device'", 'null': 'True', 'to': "orm['ralph_assets.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'source_device': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'source_device'", 'null': 'True', 'to': "orm['ralph_assets.Asset']"})
},
'ralph_assets.reportodtsource': {
'Meta': {'object_name': 'ReportOdtSource'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'ralph_assets.service': {
'Meta': {'object_name': 'Service'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
'ralph_assets.softwarecategory': {
'Meta': {'object_name': 'SoftwareCategory'},
'asset_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
},
'ralph_assets.transition': {
'Meta': {'object_name': 'Transition'},
'actions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ralph_assets.Action']", 'symmetrical': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'from_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'to_status': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'ralph_assets.transitionshistory': {
'Meta': {'ordering': "[u'-created']", 'object_name': 'TransitionsHistory'},
'affected_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'affected user'", 'to': "orm['auth.User']"}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ralph_assets.Asset']", 'symmetrical': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'logged user'", 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'report_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'report_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'transition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ralph_assets.Transition']"}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'ralph_assets.warehouse': {
'Meta': {'object_name': 'Warehouse'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['account.Profile']", 'blank': 'True', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'})
}
}
complete_apps = ['ralph_assets']
| 74.236979
| 224
| 0.60522
| 6,156
| 57,014
| 5.44883
| 0.045971
| 0.083475
| 0.145664
| 0.208091
| 0.880273
| 0.842024
| 0.787109
| 0.722804
| 0.674478
| 0.630922
| 0
| 0.007668
| 0.183411
| 57,014
| 767
| 225
| 74.333768
| 0.712804
| 0.057933
| 0
| 0.378007
| 0
| 0
| 0.534773
| 0.322815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003436
| false
| 0.001718
| 0.015464
| 0
| 0.024055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cead398064b594593f3430fbc788b9476bf86da6
| 150
|
py
|
Python
|
venv/Lib/site-packages/clyent/errors.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | null | null | null |
venv/Lib/site-packages/clyent/errors.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | null | null | null |
venv/Lib/site-packages/clyent/errors.py
|
GiovanniConserva/TestDeploy
|
7a8242df6fe996b1029497d2d87295d1531b6139
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
class ShowHelp(Exception):
pass
class ClyentError(Exception):
pass
| 18.75
| 72
| 0.8
| 17
| 150
| 6.647059
| 0.764706
| 0.230089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 150
| 7
| 73
| 21.428571
| 0.882813
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.4
| 0.2
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
cebb72569f74c340b49b55e56cd5cfb94ded36d4
| 229
|
py
|
Python
|
test/webdnn_test/graph_test/operators_test/sigmoid_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
test/webdnn_test/graph_test/operators_test/sigmoid_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
test/webdnn_test/graph_test/operators_test/sigmoid_test.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
from test.webdnn_test.graph_test.operators_test.util import template_test_unary_operator
from webdnn.graph.operators.sigmoid import Sigmoid
def template():
template_test_unary_operator(Sigmoid)
def test():
template()
| 20.818182
| 88
| 0.812227
| 31
| 229
| 5.709677
| 0.387097
| 0.135593
| 0.19209
| 0.282486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113537
| 229
| 10
| 89
| 22.9
| 0.871921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cebdd561ae5cf73cc61b02a50a7e42a495c58927
| 67
|
py
|
Python
|
deem/pytorch/layers/__init__.py
|
xxaxtt/TwoTowers
|
206c6b38a2f72486906d391c5176e4508036aac0
|
[
"Apache-2.0"
] | 14
|
2021-09-22T02:24:16.000Z
|
2021-12-11T11:59:02.000Z
|
deem/pytorch/layers/__init__.py
|
xxaxtt/TwoTowers
|
206c6b38a2f72486906d391c5176e4508036aac0
|
[
"Apache-2.0"
] | 2
|
2021-10-16T04:39:21.000Z
|
2021-12-01T08:04:46.000Z
|
deem/pytorch/layers/__init__.py
|
xxaxtt/TwoTowers
|
206c6b38a2f72486906d391c5176e4508036aac0
|
[
"Apache-2.0"
] | 5
|
2021-10-09T11:47:53.000Z
|
2021-11-25T04:41:24.000Z
|
from .embedding import *
from .sequence import *
from .mlp import *
| 22.333333
| 24
| 0.746269
| 9
| 67
| 5.555556
| 0.555556
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 67
| 3
| 25
| 22.333333
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c7a5957fe9864225fb891e5477469385f447a91
| 3,456
|
py
|
Python
|
complex_venv/lib/python3.7/site-packages/test/test_graph_list_of_file_inputs.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 1
|
2021-10-06T00:21:10.000Z
|
2021-10-06T00:21:10.000Z
|
complex_venv/lib/python3.7/site-packages/test/test_graph_list_of_file_inputs.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 14
|
2021-01-15T21:51:38.000Z
|
2021-11-10T10:08:22.000Z
|
complex_venv/lib/python3.7/site-packages/test/test_graph_list_of_file_inputs.py
|
lubianat/complex_bot
|
e0ddabcc0487c52b14fb94950c5a812f0bdb2283
|
[
"MIT"
] | 1
|
2021-01-18T10:32:56.000Z
|
2021-01-18T10:32:56.000Z
|
import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, G1_NT, default_namespaces, BASE_FILES_GENERAL
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import NT, TURTLE
_BASE_DIR = BASE_FILES + "graph_list_of_files_input" + pth.sep
class TestGraphListOfFilesInput(unittest.TestCase):
def test_one_turtle(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[G1],
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_one_nt(self): # Should be nt
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[G1_NT],
namespaces_dict=default_namespaces(),
input_format=NT,
all_classes_mode=False,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_several_nt(self): # Should be nt
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[_BASE_DIR + "g1_p1.nt",
_BASE_DIR + "g1_p2.nt"],
namespaces_dict=default_namespaces(),
input_format=NT,
all_classes_mode=False,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_several_turtle(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_list_of_files_input=[_BASE_DIR + "g1_p1.ttl",
_BASE_DIR + "g1_p2.ttl"],
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex",
str_target=str_result))
| 54.857143
| 120
| 0.550926
| 374
| 3,456
| 4.700535
| 0.184492
| 0.040956
| 0.054608
| 0.07281
| 0.836746
| 0.81058
| 0.81058
| 0.81058
| 0.81058
| 0.81058
| 0
| 0.014625
| 0.366898
| 3,456
| 62
| 121
| 55.741935
| 0.788848
| 0.007234
| 0
| 0.666667
| 0
| 0
| 0.132799
| 0.044266
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.074074
| false
| 0
| 0.111111
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0c8aee4b13af709adea28d410ab48e9fcca43ac4
| 83
|
py
|
Python
|
pyqt_horizontal_selection_square_graphics_view/__init__.py
|
berty-2007/pyqt-horizontal-selection-square-graphics-view
|
29d3d6f63a2d464b0c4b1d64c451439de6f1eded
|
[
"MIT"
] | 1
|
2021-12-23T14:44:07.000Z
|
2021-12-23T14:44:07.000Z
|
pyqt_horizontal_selection_square_graphics_view/__init__.py
|
berty-2007/pyqt-horizontal-selection-square-graphics-view
|
29d3d6f63a2d464b0c4b1d64c451439de6f1eded
|
[
"MIT"
] | null | null | null |
pyqt_horizontal_selection_square_graphics_view/__init__.py
|
berty-2007/pyqt-horizontal-selection-square-graphics-view
|
29d3d6f63a2d464b0c4b1d64c451439de6f1eded
|
[
"MIT"
] | null | null | null |
from .horizontalSelectionSquareGraphicsView import *
from .selectionSquare import *
| 41.5
| 52
| 0.86747
| 6
| 83
| 12
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 83
| 2
| 53
| 41.5
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cb626407dc59dff1be601a5e0499c7a012ea0ad
| 75
|
py
|
Python
|
app/database/base.py
|
CabetoDP/fastapi-crud
|
bbeef58b74b7a010037ca8503a7f05f8b4db2ab4
|
[
"MIT"
] | null | null | null |
app/database/base.py
|
CabetoDP/fastapi-crud
|
bbeef58b74b7a010037ca8503a7f05f8b4db2ab4
|
[
"MIT"
] | null | null | null |
app/database/base.py
|
CabetoDP/fastapi-crud
|
bbeef58b74b7a010037ca8503a7f05f8b4db2ab4
|
[
"MIT"
] | null | null | null |
from app.database.base_class import Base
from app.models.place import Place
| 37.5
| 40
| 0.853333
| 13
| 75
| 4.846154
| 0.615385
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 75
| 2
| 41
| 37.5
| 0.926471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cdf83ec2ee6735ac3ecbd989380ce0f87917a5d
| 102
|
py
|
Python
|
api/queries/models.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 3
|
2019-05-15T09:30:39.000Z
|
2020-04-22T16:14:23.000Z
|
api/queries/models.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 85
|
2019-04-24T10:39:35.000Z
|
2022-03-21T14:52:12.000Z
|
api/queries/models.py
|
django-doctor/lite-api
|
1ba278ba22ebcbb977dd7c31dd3701151cd036bf
|
[
"MIT"
] | 1
|
2021-01-17T11:12:19.000Z
|
2021-01-17T11:12:19.000Z
|
from api.cases.models import Case
class Query(Case):
"""
Base query class
"""
pass
| 10.2
| 33
| 0.588235
| 13
| 102
| 4.615385
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.303922
| 102
| 9
| 34
| 11.333333
| 0.84507
| 0.156863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
0bb5af4cb0e1469e03fc6ee0d14c4d8bfb035eff
| 18,592
|
py
|
Python
|
autoarray/structures/grids/grid_decorators.py
|
jonathanfrawley/PyAutoArray_copy
|
c21e8859bdb20737352147b9904797ac99985b73
|
[
"MIT"
] | null | null | null |
autoarray/structures/grids/grid_decorators.py
|
jonathanfrawley/PyAutoArray_copy
|
c21e8859bdb20737352147b9904797ac99985b73
|
[
"MIT"
] | null | null | null |
autoarray/structures/grids/grid_decorators.py
|
jonathanfrawley/PyAutoArray_copy
|
c21e8859bdb20737352147b9904797ac99985b73
|
[
"MIT"
] | null | null | null |
import numpy as np
from functools import wraps
from autoconf import conf
from autoarray.structures.grids.one_d import abstract_grid_1d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_interpolate
from autoarray.structures.grids.two_d import grid_2d_iterate
from autoarray.structures.grids.two_d import grid_2d_irregular
from autoarray.structures.arrays.one_d import array_1d
from autoarray.structures.arrays import values
from autoarray import exc
from typing import Union
def grid_1d_to_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(
obj, grid, *args, **kwargs
) -> Union[array_1d.Array1D, values.ValuesIrregular]:
"""
This decorator homogenizes the input of a "grid_like" 2D structure (`Grid2D`, `Grid2DIterate`,
`Grid2DInterpolate`, `Grid2DIrregular` or `AbstractGrid1D`) into a function. It allows these classes to be
interchangeably input into a function, such that the grid is used to evaluate the function at every (y,x)
coordinates of the grid using specific functionality of the input grid.
The grid_like objects `Grid2D` and `Grid2DIrregular` are input into the function as a slimmed 2D NumPy array
of shape [total_coordinates, 2] where the second dimension stores the (y,x) values. If a `Grid2DIterate` is
input, the function is evaluated using the appropriate iterated_*_from_func* function.
The outputs of the function are converted from a 1D or 2D NumPy Array2D to an `Array2D`, `Grid2D`,
`ValuesIrregular` or `Grid2DIrregular` objects, whichever is applicable as follows:
- If the function returns (y,x) coordinates at every input point, the returned results are a `Grid2D`
or `Grid2DIrregular` structure, the same structure as the input.
- If the function returns scalar values at every input point and a `Grid2D` is input, the returned results are
an `Array2D` structure which uses the same dimensions and mask as the `Grid2D`.
- If the function returns scalar values at every input point and `Grid2DIrregular` are input, the returned
results are a `ValuesIrregular` object with structure resembling that of the `Grid2DIrregular`.
If the input array is not a `Grid2D` structure (e.g. it is a 2D NumPy array) the output is a NumPy array.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object.
"""
centre = (0.0, 0.0)
if hasattr(obj, "centre"):
if obj.centre is not None:
centre = obj.centre
angle = 0.0
if hasattr(obj, "angle"):
if obj.angle is not None:
angle = obj.angle + 90.0
if (
isinstance(grid, grid_2d.Grid2D)
or isinstance(grid, grid_2d_iterate.Grid2DIterate)
or isinstance(grid, grid_2d_interpolate.Grid2DInterpolate)
):
grid_2d_projected = grid.grid_2d_radial_projected_from(
centre=centre, angle=angle
)
result = func(obj, grid_2d_projected, *args, **kwargs)
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
result = func(obj, grid, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
grid_2d_radial = grid.project_to_radial_grid_2d(angle=angle)
result = func(obj, grid_2d_radial, *args, **kwargs)
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
raise exc.GridException(
"You cannot input a NumPy array to a `quantity_1d_from_grid` method."
)
return wrapper
def grid_1d_output_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(
obj, grid, *args, **kwargs
) -> Union[array_1d.Array1D, values.ValuesIrregular]:
"""
This decorator homogenizes the input of a "grid_like" 2D structure (`Grid2D`, `Grid2DIterate`,
`Grid2DInterpolate`, `Grid2DIrregular` or `AbstractGrid1D`) into a function. It allows these classes to be
interchangeably input into a function, such that the grid is used to evaluate the function at every (y,x)
coordinates of the grid using specific functionality of the input grid.
The grid_like objects `Grid2D` and `Grid2DIrregular` are input into the function as a slimmed 2D NumPy array
of shape [total_coordinates, 2] where the second dimension stores the (y,x) values. If a `Grid2DIterate` is
input, the function is evaluated using the appropriate iterated_*_from_func* function.
The outputs of the function are converted from a 1D or 2D NumPy Array2D to an `Array2D`, `Grid2D`,
`ValuesIrregular` or `Grid2DIrregular` objects, whichever is applicable as follows:
- If the function returns (y,x) coordinates at every input point, the returned results are a `Grid2D`
or `Grid2DIrregular` structure, the same structure as the input.
- If the function returns scalar values at every input point and a `Grid2D` is input, the returned results are
an `Array2D` structure which uses the same dimensions and mask as the `Grid2D`.
- If the function returns scalar values at every input point and `Grid2DIrregular` are input, the returned
results are a `ValuesIrregular` object with structure resembling that of the `Grid2DIrregular`.
If the input array is not a `Grid2D` structure (e.g. it is a 2D NumPy array) the output is a NumPy array.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object.
"""
result = func(obj, grid, *args, **kwargs)
if (
isinstance(grid, grid_2d.Grid2D)
or isinstance(grid, grid_2d_iterate.Grid2DIterate)
or isinstance(grid, grid_2d_interpolate.Grid2DInterpolate)
):
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
return array_1d.Array1D.manual_slim(
array=result, pixel_scales=grid.pixel_scale
)
raise exc.GridException(
"You cannot input a NumPy array to a `quantity_1d_from_grid` method."
)
return wrapper
def grid_2d_to_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(obj, grid, *args, **kwargs):
"""
This decorator homogenizes the input of a "grid_like" 2D structure (`Grid2D`, `Grid2DIterate`,
`Grid2DInterpolate`, `Grid2DIrregular` or `AbstractGrid1D`) into a function. It allows these classes to be
interchangeably input into a function, such that the grid is used to evaluate the function at every (y,x)
coordinates of the grid using specific functionality of the input grid.
The grid_like objects `Grid2D` and `Grid2DIrregular` are input into the function as a slimmed 2D NumPy array
of shape [total_coordinates, 2] where the second dimension stores the (y,x) values. If a `Grid2DIterate` is
input, the function is evaluated using the appropriate iterated_*_from_func* function.
The outputs of the function are converted from a 1D or 2D NumPy Array2D to an `Array2D`, `Grid2D`,
`ValuesIrregular` or `Grid2DIrregular` objects, whichever is applicable as follows:
- If the function returns (y,x) coordinates at every input point, the returned results are a `Grid2D`
or `Grid2DIrregular` structure, the same structure as the input.
- If the function returns scalar values at every input point and a `Grid2D` is input, the returned results are
an `Array2D` structure which uses the same dimensions and mask as the `Grid2D`.
- If the function returns scalar values at every input point and `Grid2DIrregular` are input, the returned
results are a `ValuesIrregular` object with structure resembling that of the `Grid2DIrregular`.
If the input array is not a `Grid2D` structure (e.g. it is a 2D NumPy array) the output is a NumPy array.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object.
"""
if isinstance(grid, grid_2d_iterate.Grid2DIterate):
return grid.iterated_result_from_func(func=func, cls=obj)
elif isinstance(grid, grid_2d_interpolate.Grid2DInterpolate):
return grid.result_from_func(func=func, cls=obj)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
result = func(obj, grid, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, grid_2d.Grid2D):
result = func(obj, grid, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
grid_2d_radial = grid.project_to_radial_grid_2d()
result = func(obj, grid_2d_radial, *args, **kwargs)
return grid.structure_2d_from_result(result=result)
if not isinstance(grid, grid_2d_irregular.Grid2DIrregular) and not isinstance(
grid, grid_2d.Grid2D
):
return func(obj, grid, *args, **kwargs)
return wrapper
def grid_2d_to_structure_list(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates and return the results as
a list of NumPy arrays.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(obj, grid, *args, **kwargs):
"""
This decorator serves the same purpose as the `grid_2d_to_structure` decorator, but it deals with functions whose
output is a list of results as opposed to a single NumPy array. It simply iterates over these lists to perform
the same conversions as `grid_2d_to_structure`.
Parameters
----------
obj : object
An object whose function uses grid_like inputs to compute quantities at every coordinate on the grid.
grid : Grid2D or Grid2DIrregular
A grid_like object of (y,x) coordinates on which the function values are evaluated.
Returns
-------
The function values evaluated on the grid with the same structure as the input grid_like object in a list
of NumPy arrays.
"""
if isinstance(grid, grid_2d_iterate.Grid2DIterate):
mask = grid.mask.mask_new_sub_size_from(
mask=grid.mask, sub_size=max(grid.sub_steps)
)
grid_compute = grid_2d.Grid2D.from_mask(mask=mask)
result_list = func(obj, grid_compute, *args, **kwargs)
result_list = [
grid_compute.structure_2d_from_result(result=result)
for result in result_list
]
result_list = [result.binned for result in result_list]
return grid.grid.structure_2d_list_from_result_list(result_list=result_list)
elif isinstance(grid, grid_2d_interpolate.Grid2DInterpolate):
return func(obj, grid, *args, **kwargs)
elif isinstance(grid, grid_2d_irregular.Grid2DIrregular):
result_list = func(obj, grid, *args, **kwargs)
return grid.structure_2d_list_from_result_list(result_list=result_list)
elif isinstance(grid, grid_2d.Grid2D):
result_list = func(obj, grid, *args, **kwargs)
return grid.structure_2d_list_from_result_list(result_list=result_list)
elif isinstance(grid, abstract_grid_1d.AbstractGrid1D):
grid_2d_radial = grid.project_to_radial_grid_2d()
result_list = func(obj, grid_2d_radial, *args, **kwargs)
return grid.structure_2d_list_from_result_list(result_list=result_list)
if not isinstance(grid, grid_2d_irregular.Grid2DIrregular) and not isinstance(
grid, grid_2d.Grid2D
):
return func(obj, grid, *args, **kwargs)
return wrapper
def transform(func):
"""Checks whether the input Grid2D of (y,x) coordinates have previously been transformed. If they have not \
been transformed then they are transformed.
Parameters
----------
func : (profile, grid *args, **kwargs) -> Object
A function where the input grid is the grid whose coordinates are transformed.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(cls, grid, *args, **kwargs):
"""
Parameters
----------
cls : Profile
The class that owns the function.
grid : grid_like
The (y, x) coordinates in the original reference frame of the grid.
Returns
-------
A grid_like object whose coordinates may be transformed.
"""
if not isinstance(
grid,
(
grid_2d.Grid2DTransformed,
grid_2d.Grid2DTransformedNumpy,
grid_2d_irregular.Grid2DIrregularTransformed,
),
):
result = func(
cls, cls.transform_grid_to_reference_frame(grid), *args, **kwargs
)
return result
else:
return func(cls, grid, *args, **kwargs)
return wrapper
def relocate_to_radial_minimum(func):
""" Checks whether any coordinates in the grid are radially near (0.0, 0.0), which can lead to numerical faults in \
the evaluation of a function (e.g. numerical integration reaching a singularity at (0.0, 0.0)). If any coordinates
are radially within the the radial minimum threshold, their (y,x) coordinates are shifted to that value to ensure
they are evaluated at that coordinate.
The value the (y,x) coordinates are rounded to is set in the 'radial_min.ini' config.
Parameters
----------
func : (profile, *args, **kwargs) -> Object
A function that takes a grid of coordinates which may have a singularity as (0.0, 0.0)
Returns
-------
A function that can except cartesian or transformed coordinates
"""
@wraps(func)
def wrapper(cls, grid, *args, **kwargs):
"""
Parameters
----------
cls : Profile
The class that owns the function.
grid : grid_like
The (y, x) coordinates which are to be radially moved from (0.0, 0.0).
Returns
-------
The grid_like object whose coordinates are radially moved from (0.0, 0.0).
"""
grid_radial_minimum = conf.instance["grids"]["radial_minimum"][
"radial_minimum"
][cls.__class__.__name__]
with np.errstate(all="ignore"): # Division by zero fixed via isnan
grid_radii = cls.grid_to_grid_radii(grid=grid)
grid_radial_scale = np.where(
grid_radii < grid_radial_minimum, grid_radial_minimum / grid_radii, 1.0
)
grid = np.multiply(grid, grid_radial_scale[:, None])
grid[np.isnan(grid)] = grid_radial_minimum
return func(cls, grid, *args, **kwargs)
return wrapper
| 42.254545
| 122
| 0.64119
| 2,353
| 18,592
| 4.948576
| 0.098173
| 0.022673
| 0.025678
| 0.03607
| 0.824287
| 0.801185
| 0.789334
| 0.771986
| 0.755153
| 0.737891
| 0
| 0.017292
| 0.284585
| 18,592
| 439
| 123
| 42.350797
| 0.858131
| 0.5142
| 0
| 0.546584
| 0
| 0
| 0.024963
| 0.006241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074534
| false
| 0
| 0.074534
| 0
| 0.31677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f01546244daef76f91454218d243e57cff9b2fef
| 113
|
py
|
Python
|
feast/DetectionModules/__init__.py
|
ChandlerKemp/FEAST_PtE
|
9551824932379149dd6bc9135cfac6edf60c40c8
|
[
"MIT"
] | 3
|
2020-04-21T18:59:01.000Z
|
2021-01-14T22:56:17.000Z
|
feast/DetectionModules/__init__.py
|
ChandlerKemp/FEAST_PtE
|
9551824932379149dd6bc9135cfac6edf60c40c8
|
[
"MIT"
] | null | null | null |
feast/DetectionModules/__init__.py
|
ChandlerKemp/FEAST_PtE
|
9551824932379149dd6bc9135cfac6edf60c40c8
|
[
"MIT"
] | null | null | null |
from . import null
from . import abstract_detection_method
from . import tech_detect
from . import tiered_detect
| 22.6
| 39
| 0.823009
| 16
| 113
| 5.5625
| 0.5625
| 0.449438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141593
| 113
| 4
| 40
| 28.25
| 0.917526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f03b44b7bd155b16cda8a428f739db53cb3a8257
| 138
|
py
|
Python
|
helios/workflows/__init__.py
|
thiagosfs/helios-server
|
1616f742c0d3ab8833aab4cfbcc45d9818c68716
|
[
"Apache-2.0"
] | 525
|
2015-01-04T11:51:26.000Z
|
2022-03-31T17:15:20.000Z
|
helios/workflows/__init__.py
|
thiagosfs/helios-server
|
1616f742c0d3ab8833aab4cfbcc45d9818c68716
|
[
"Apache-2.0"
] | 238
|
2015-01-02T17:50:37.000Z
|
2022-02-09T16:39:49.000Z
|
helios/workflows/__init__.py
|
thiagosfs/helios-server
|
1616f742c0d3ab8833aab4cfbcc45d9818c68716
|
[
"Apache-2.0"
] | 238
|
2015-01-05T23:09:20.000Z
|
2022-03-21T16:47:33.000Z
|
"""
Helios Election Workflows
"""
from helios.datatypes import LDObjectContainer
class WorkflowObject(LDObjectContainer):
pass
| 13.8
| 46
| 0.76087
| 12
| 138
| 8.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 138
| 9
| 47
| 15.333333
| 0.913043
| 0.181159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
f052b9fc28af42e699049bdfe2b0ac01d467c316
| 187
|
py
|
Python
|
user_details/give_default.py
|
Shreyanshsachan/College-Predictor
|
87068aa1d1a889ced586ff155bc2b5d9a78340f7
|
[
"MIT"
] | null | null | null |
user_details/give_default.py
|
Shreyanshsachan/College-Predictor
|
87068aa1d1a889ced586ff155bc2b5d9a78340f7
|
[
"MIT"
] | null | null | null |
user_details/give_default.py
|
Shreyanshsachan/College-Predictor
|
87068aa1d1a889ced586ff155bc2b5d9a78340f7
|
[
"MIT"
] | null | null | null |
preference_list_of_user=[]
def give(def_list):
Def=def_list
global preference_list_of_user
preference_list_of_user=Def
return Def
def give_to_model():
return preference_list_of_user
| 20.777778
| 31
| 0.84492
| 32
| 187
| 4.4375
| 0.3125
| 0.394366
| 0.450704
| 0.56338
| 0.323944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096257
| 187
| 9
| 32
| 20.777778
| 0.840237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.125
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
b2ef1e91c18ddeb4d7361450a7de67ebdb4b2e6e
| 1,588
|
py
|
Python
|
triplinker/tests/test_views/test_non_dynamic_urls/test_accounts_views/tests.py
|
GonnaFlyMethod/triplinker
|
f4189e499ad48fd9102dd2211a8884078136eae9
|
[
"MIT"
] | null | null | null |
triplinker/tests/test_views/test_non_dynamic_urls/test_accounts_views/tests.py
|
GonnaFlyMethod/triplinker
|
f4189e499ad48fd9102dd2211a8884078136eae9
|
[
"MIT"
] | null | null | null |
triplinker/tests/test_views/test_non_dynamic_urls/test_accounts_views/tests.py
|
GonnaFlyMethod/triplinker
|
f4189e499ad48fd9102dd2211a8884078136eae9
|
[
"MIT"
] | null | null | null |
# Python modules.
import pytest
# Django modules.
from django.urls import reverse
from django.test import TestCase
# !Triplinker modules:
from tests.helpers.create_user import new_user
@pytest.mark.django_db
def test_signup_view(client):
url = reverse('accounts:signup')
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_signin_view(client):
url = reverse('accounts:login')
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_profile_view(client):
response = new_user()['client']
url = reverse('accounts:profile')
response = response.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_profile_edit_view(client):
response = new_user()['client']
url = reverse('accounts:profile_edit')
response = response.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_feed_view(client):
response = new_user()['client']
url = reverse('accounts:feed')
response = response.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_feed_view(client):
response = new_user()['client']
url = reverse('accounts:all_users_list')
response = response.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_logout_view(client):
response = new_user()['client']
url = reverse('accounts:logout')
response = response.get(url)
assert response.status_code == 200
| 25.206349
| 47
| 0.691436
| 204
| 1,588
| 5.191176
| 0.191176
| 0.0661
| 0.10576
| 0.11898
| 0.803588
| 0.750708
| 0.727101
| 0.727101
| 0.727101
| 0.634561
| 0
| 0.016445
| 0.195844
| 1,588
| 62
| 48
| 25.612903
| 0.812843
| 0.032746
| 0
| 0.636364
| 0
| 0
| 0.1
| 0.029932
| 0
| 0
| 0
| 0
| 0.159091
| 1
| 0.159091
| false
| 0
| 0.090909
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2f17c3de89d94e2aba8cc14a42ef09cd569851a
| 41
|
py
|
Python
|
tests/test_vec/__init__.py
|
karin0018/EduNLP
|
172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f
|
[
"Apache-2.0"
] | 18
|
2021-02-15T13:10:42.000Z
|
2022-03-17T12:57:34.000Z
|
tests/test_vec/__init__.py
|
karin0018/EduNLP
|
172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f
|
[
"Apache-2.0"
] | 81
|
2021-06-02T07:45:20.000Z
|
2022-03-29T15:21:32.000Z
|
tests/test_vec/__init__.py
|
karin0018/EduNLP
|
172c36a2cd2c41a1f1c5833d7b1abcbc5e3bbd5f
|
[
"Apache-2.0"
] | 29
|
2021-05-18T08:34:58.000Z
|
2022-03-12T00:19:09.000Z
|
# coding: utf-8
# 2021/5/30 @ tongshiwei
| 13.666667
| 24
| 0.658537
| 7
| 41
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0.170732
| 41
| 2
| 25
| 20.5
| 0.558824
| 0.878049
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8ed68a76b6810bfc7416102a15fd740faaea0ec
| 4,699
|
py
|
Python
|
program.py
|
jaesik817/programmable-agents_tensorflow
|
b64d1774803c585e87aa9769beadde31e18f8ea4
|
[
"MIT"
] | 39
|
2017-09-25T02:01:18.000Z
|
2019-06-18T15:17:53.000Z
|
program.py
|
jsikyoon/programmable-agents_tensorflow
|
b64d1774803c585e87aa9769beadde31e18f8ea4
|
[
"MIT"
] | 5
|
2017-09-22T00:40:09.000Z
|
2018-05-07T15:11:11.000Z
|
program.py
|
jsikyoon/programmable-agents_tensorflow
|
b64d1774803c585e87aa9769beadde31e18f8ea4
|
[
"MIT"
] | 10
|
2017-09-25T06:49:12.000Z
|
2019-06-18T10:17:03.000Z
|
import tensorflow as tf
import numpy as np
import math
# Parameter
order_num=2;
class Program:
def __init__(self,sess,state_dim,obj_num,fea_size,Theta,program_order,postfix):
self.sess = sess;
self.state_dim = state_dim;
self.fea_size=fea_size;
self.obj_num=obj_num;
self.order_num=order_num;
self.Theta=Theta;
self.program_order=program_order;
self.postfix=postfix;
self.p = self.compile_order();
def compile_order(self):
self.Theta=tf.reshape(self.Theta,[-1,self.obj_num,6]);
self.Theta=tf.transpose(self.Theta,perm=[0,2,1]);
self.Theta=tf.unstack(self.Theta,6,1);
# temporary ordering
p_1=tf.multiply(self.Theta[0],self.Theta[3]);
p_1=p_1+self.Theta[5];
p_2=tf.multiply(self.Theta[1],self.Theta[3]);
p_2=p_2+self.Theta[5];
p_3=tf.multiply(self.Theta[0],self.Theta[4]);
p_3=p_3+self.Theta[5];
p_4=tf.multiply(self.Theta[1],self.Theta[4]);
p_4=p_4+self.Theta[5];
program_order2=tf.unstack(self.program_order,(self.obj_num-1),1);
p=tf.multiply(tf.stack([program_order2[0]]*(self.obj_num),1),p_1)+tf.multiply(tf.stack([program_order2[1]]*(self.obj_num),1),p_2)+tf.multiply(tf.stack([program_order2[2]]*(self.obj_num),1),p_3)+tf.multiply(tf.stack([program_order2[3]]*(self.obj_num),1),p_4);
# Currently tf.cond makes problems
"""
program_order2=tf.unstack(self.program_order,self.order_num,1);
for i in range(self.order_num):
program_order2[i]=tf.unstack(program_order2[i],3,1);
for i in range(self.order_num):
for k in range(9):
for l in range(k+1,9):
# not=1, and=2, or=3
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],k),lambda:1-self.Theta[k],lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],-1),lambda:1-p,lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:tf.multiply(self.Theta[k],self.Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],-1),lambda:tf.multiply(self.Theta[k],p),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:self.Theta[k]+self.Theta[l]-tf.multiply(self.Theta[k],self.Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:self.Theta[k]+p-tf.multiply(self.Theta[k],p),lambda:p);
"""
return p;
def run_target_nets(self,Theta,program_order):
Theta=tf.reshape(Theta,[-1,self.obj_num,6]);
Theta=tf.transpose(Theta,perm=[0,2,1]);
Theta=tf.unstack(Theta,6,1);
# temporary ordering
p_1=tf.multiply(Theta[0],Theta[3]);
p_1=p_1+Theta[5];
p_2=tf.multiply(Theta[1],Theta[3]);
p_2=p_2+Theta[5];
p_3=tf.multiply(Theta[0],Theta[4]);
p_3=p_3+Theta[5];
p_4=tf.multiply(Theta[1],Theta[4]);
p_4=p_4+Theta[5];
program_order2=tf.unstack(program_order,(self.obj_num-1),1);
p=tf.multiply(tf.stack([program_order2[0]]*(self.obj_num),1),p_1)+tf.multiply(tf.stack([program_order2[1]]*(self.obj_num),1),p_2)+tf.multiply(tf.stack([program_order2[2]]*(self.obj_num),1),p_3)+tf.multiply(tf.stack([program_order2[3]]*(self.obj_num),1),p_4);
# Currently tf.cond makes problems
"""
# Currently tf.cond makes problems
program_order2=tf.unstack(program_order,self.order_num,1);
for i in range(self.order_num):
program_order2[i]=tf.unstack(program_order2[i],3,1);
for i in range(self.order_num):
for k in range(9):
for l in range(k+1,9):
# not=1, and=2, or=3
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],k),lambda:1-Theta[k],lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],-1),lambda:1-p,lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:tf.multiply(Theta[k],Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],-1),lambda:tf.multiply(Theta[k],p),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:Theta[k]+Theta[l]-tf.multiply(Theta[k],Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:Theta[k]+p-tf.multiply(Theta[k],p),lambda:p);
"""
return p;
| 53.397727
| 262
| 0.668227
| 888
| 4,699
| 3.394144
| 0.068694
| 0.207034
| 0.16722
| 0.212342
| 0.835435
| 0.818845
| 0.754479
| 0.699403
| 0.678832
| 0.631055
| 0
| 0.050507
| 0.119387
| 4,699
| 87
| 263
| 54.011494
| 0.677864
| 0.02426
| 0
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8eedc51b24c6143d7853efa95a31479c5ffbbd9
| 2,645
|
py
|
Python
|
tests/commands/test_generate.py
|
pedrovelho/camp
|
98105c9054b8db3377cb6a06e7b5451b97c6c285
|
[
"MIT"
] | null | null | null |
tests/commands/test_generate.py
|
pedrovelho/camp
|
98105c9054b8db3377cb6a06e7b5451b97c6c285
|
[
"MIT"
] | null | null | null |
tests/commands/test_generate.py
|
pedrovelho/camp
|
98105c9054b8db3377cb6a06e7b5451b97c6c285
|
[
"MIT"
] | 1
|
2019-02-05T08:49:41.000Z
|
2019-02-05T08:49:41.000Z
|
#
# CAMP
#
# Copyright (C) 2017, 2018 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from unittest import TestCase
from camp.commands import Command, Generate
class DefaultValuesAreCorrect(TestCase):
def test_given_no_working_directory(self):
command_line = "generate --all"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.working_directory,
Generate.DEFAULT_WORKING_DIRECTORY)
def test_given_no_working_directory(self):
command_line = "generate -d my/directory"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.only_coverage,
Generate.DEFAULT_COVERAGE)
class ShortOptionsAreAccepted(TestCase):
def test_given_working_directory(self):
command_line = "generate --d my/test/directory"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.working_directory,
"my/test/directory")
def test_given_only_coverage(self):
command_line = "generate --c"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertTrue(command.only_coverage)
def test_given_all_configurations(self):
command_line = "generate --a"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertFalse(command.only_coverage)
class LongOptionsAreAccepted(TestCase):
def test_given_working_directory(self):
command_line = "generate --directory my/test/directory"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.working_directory,
"my/test/directory")
def test_given_only_coverage(self):
command_line = "generate --coverage"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertTrue(command.only_coverage)
def test_given_all_configurations(self):
command_line = "generate --all"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertFalse(command.only_coverage)
| 25.190476
| 63
| 0.689981
| 280
| 2,645
| 6.3
| 0.210714
| 0.099773
| 0.054422
| 0.104308
| 0.781746
| 0.781746
| 0.781746
| 0.781746
| 0.779478
| 0.779478
| 0
| 0.003897
| 0.223819
| 2,645
| 104
| 64
| 25.432692
| 0.855334
| 0.068809
| 0
| 0.714286
| 0
| 0
| 0.08031
| 0
| 0
| 0
| 0
| 0
| 0.326531
| 1
| 0.163265
| false
| 0
| 0.040816
| 0
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8f4b23ed19c18a99bdef84f2585aee923db8769
| 3,306
|
py
|
Python
|
pyathena/util/rebin.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | 1
|
2019-10-03T13:59:14.000Z
|
2019-10-03T13:59:14.000Z
|
pyathena/util/rebin.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | 3
|
2020-09-23T23:36:17.000Z
|
2022-01-11T06:16:56.000Z
|
pyathena/util/rebin.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | 2
|
2019-06-10T04:26:16.000Z
|
2019-12-04T22:27:02.000Z
|
from __future__ import print_function
import numpy as np
def rebin_xyz(arr, bin_factor, fill_value=None):
"""
Function to rebin masked 3d array.
Parameters
----------
arr : ndarray
Masked or unmasked 3d numpy array. Shape is assumed to be (nz, ny, nx).
bin_factor : int
binning factor
fill_value: float
If arr is a masked array, fill masked elements with fill_value.
If *None*, masked elements will be neglected in calculating average.
Default value is *None*.
Return
------
arr_rebin: ndarray
Smaller size, (averaged) 3d array. Shape is assumed to be
(nz//bin_factor, ny//bin_factor, nx//bin_factor)
"""
if bin_factor == 1:
return arr
# number of cells in the z-direction and xy-direction
nz0 = arr.shape[0]
ny0 = arr.shape[1]
nx0 = arr.shape[2]
# size of binned array
nz1 = nz0 // bin_factor
ny1 = ny0 // bin_factor
nx1 = nx0 // bin_factor
if np.ma.is_masked(arr) and fill_value is not None:
np.ma.set_fill_value(arr, fill_value)
arr = arr.filled()
# See
# https://stackoverflow.com/questions/4624112/grouping-2d-numpy-array-in-average/4624923#4624923
return arr.reshape([nz1, nz0//nz1, ny1, ny0//ny1, nx1, nx0//nx1]).mean(axis=-1).mean(axis=3).mean(axis=1)
def rebin_xy(arr, bin_factor, fill_value=None):
"""
Function to rebin masked 3d array in the x-y dimension.
Parameters
----------
arr : ndarray
Masked or unmasked 3d numpy array. Shape is assumed to be (nz, ny, nx).
bin_factor : int
binning factor
fill_value: float
If arr is a masked array, fill masked elements with fill_value.
If *None*, masked elements will be neglected in calculating average.
Default value is *None*.
Return
------
arr_rebin: ndarray
Smaller size, (averaged) 3d array. Shape is assumed to be
(nz, ny//bin_factor, nx//bin_factor)
"""
if bin_factor == 1:
return arr
# number of cells in the z-direction and xy-direction
nz = arr.shape[0]
ny0 = arr.shape[1]
nx0 = arr.shape[2]
# size of binned array
ny1 = ny0 // bin_factor
nx1 = nx0 // bin_factor
if np.ma.is_masked(arr) and fill_value is not None:
np.ma.set_fill_value(arr, fill_value)
arr = arr.filled()
# See
# https://stackoverflow.com/questions/4624112/grouping-2d-numpy-array-in-average/4624923#4624923
return arr.reshape([nz, ny1, ny0//ny1, nx1, nx0//nx1]).mean(axis=-1).mean(axis=2)
if __name__ == '__main__':
# Test of rebin_xy
mask = True
# Define test data
big = np.ma.array([[5, 5, 1, 2],
[5, 5, 2, 1],
[2, 1, 1, 1],
[2, 1, 1, 1]])
if mask:
big.mask = [[1, 1, 0, 0],
[0, 1, 1, 1],
[1, 0, 1, 0],
[1, 1, 1, 0]]
big = np.tile(big, (1, 1, 1))
small1 = rebin_xy_masked(big, 2, fill_value=0.0)
small2 = rebin_xy_masked(big, 2, fill_value=None)
print('Original array\n', big)
print('With fill value 0.0\n', small1)
print('Without fill value\n', small2)
| 28.747826
| 109
| 0.578947
| 482
| 3,306
| 3.854772
| 0.211618
| 0.077503
| 0.009688
| 0.040904
| 0.819699
| 0.814855
| 0.814855
| 0.786868
| 0.786868
| 0.786868
| 0
| 0.059792
| 0.301875
| 3,306
| 114
| 110
| 29
| 0.745234
| 0.434059
| 0
| 0.409091
| 0
| 0
| 0.038393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
68943b195cd6c1b43741489cbceaa66e3ae51918
| 3,542
|
py
|
Python
|
tests/test_utilities/test_manifest_parser.py
|
QualiSystems/DevBox
|
9a1807006bc93727970068d586764e9dccda94ec
|
[
"Apache-1.1"
] | null | null | null |
tests/test_utilities/test_manifest_parser.py
|
QualiSystems/DevBox
|
9a1807006bc93727970068d586764e9dccda94ec
|
[
"Apache-1.1"
] | null | null | null |
tests/test_utilities/test_manifest_parser.py
|
QualiSystems/DevBox
|
9a1807006bc93727970068d586764e9dccda94ec
|
[
"Apache-1.1"
] | null | null | null |
import os
from pyfakefs import fake_filesystem_unittest
from devbox.utilities.manifest_parser import ManifestParser
class TestManifestParser(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
def test_manifest_parser(self):
# Arrange
self.fs.CreateFile('my-app/devbox.yaml', contents="""
tosca_definitions_version: tosca_simple_yaml_1_0
topology_template:
node_templates:
python_server1:
type: tosca.nodes.Python
properties:
ports_bindings:
type: string
default: "{1234:80}"
artifacts:
binaries:
file: binaries.zip
python_client1:
type: tosca.nodes.Python
node_types:
tosca.nodes.Python:
derived_from: tosca.nodes.SoftwareComponent
properties:
deployment_image:
type: string
default: rastasheep/ubuntu-sshd
deployment_command:
type: string
default: /bin/sh
deployment_ports:
type: list
default: [22, 1234]
ports_bindings:
type: string
required: false
provisioning_instruction:
type: string
default: playbook.yaml
""")
nodes = ManifestParser().parse('my-app/devbox.yaml')
self.assertEqual(nodes[0].properties['deployment_ports'], [22, 1234])
self.assertEqual(nodes[0].properties['ports_bindings'], "{1234:80}")
self.assertTrue('ports_bindings' not in nodes[1].properties)
def test_manifest_parser_deployment_path(self):
# Arrange
self.fs.CreateFile('my-app/devbox.yaml', contents="""
tosca_definitions_version: tosca_simple_yaml_1_0
topology_template:
node_templates:
python_server1:
type: tosca.nodes.Python
properties:
ports_bindings:
type: string
default: "{1234:80}"
execution_command:
type: string
default: "abcd"
artifacts:
binaries:
artifacts_path: /home/user/myappfolder
deploy_path: mybin
python_client1:
type: tosca.nodes.Python
node_types:
tosca.nodes.Python:
derived_from: tosca.nodes.SoftwareComponent
properties:
deployment_image:
type: string
default: rastasheep/ubuntu-sshd
deployment_command:
type: string
default: /bin/sh
deployment_ports:
type: list
default: [22, 1234]
ports_bindings:
type: string
required: false
provisioning_instruction:
type: string
default: playbook.yaml
execution_command:
type: string
default: ""
""")
nodes = ManifestParser().parse('my-app/devbox.yaml')
self.assertEqual(nodes[0].properties['deployment_ports'], [22, 1234])
self.assertEqual(nodes[0].properties['ports_bindings'], "{1234:80}")
self.assertEqual(nodes[0].artifacts['binaries']['deploy_path'], "mybin")
self.assertEqual(nodes[0].artifacts['binaries']['artifacts_path'], "/home/user/myappfolder")
self.assertEqual(nodes[0].properties['execution_command'], "abcd")
self.assertEqual(nodes[1].properties['execution_command'], "")
self.assertTrue('ports_bindings' not in nodes[1].properties)
| 30.016949
| 100
| 0.593732
| 341
| 3,542
| 5.98827
| 0.246334
| 0.058766
| 0.083252
| 0.071988
| 0.817826
| 0.770323
| 0.743389
| 0.695397
| 0.695397
| 0.650343
| 0
| 0.027038
| 0.310841
| 3,542
| 117
| 101
| 30.273504
| 0.809504
| 0.004235
| 0
| 0.824742
| 0
| 0
| 0.690233
| 0.08234
| 0
| 0
| 0
| 0
| 0.103093
| 1
| 0.030928
| false
| 0
| 0.030928
| 0
| 0.072165
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6899ddd456696f5f8cbff28853c9ebc19f43f8ce
| 18,361
|
py
|
Python
|
monitoring/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | null | null | null |
monitoring/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
monitoring/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3.proto import service_pb2
from google.cloud.monitoring_v3.proto import service_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestServiceMonitoringServiceClient(object):
def test_create_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
service = {}
response = client.create_service(parent, service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceRequest(
parent=parent, service=service
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
service = {}
with pytest.raises(CustomException):
client.create_service(parent, service)
def test_get_service(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
expected_response = {"name": name_2, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
response = client.get_service(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.get_service(name)
def test_list_services(self):
# Setup Expected Response
next_page_token = ""
services_element = {}
services = [services_element]
expected_response = {"next_page_token": next_page_token, "services": services}
expected_response = service_service_pb2.ListServicesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.services[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServicesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_services_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service = {}
response = client.update_service(service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceRequest(service=service)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service = {}
with pytest.raises(CustomException):
client.update_service(service)
def test_delete_service(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
client.delete_service(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.delete_service(name)
def test_create_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
response = client.create_service_level_objective(
parent, service_level_objective
)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceLevelObjectiveRequest(
parent=parent, service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
with pytest.raises(CustomException):
client.create_service_level_objective(parent, service_level_objective)
def test_get_service_level_objective(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name_2, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
response = client.get_service_level_objective(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.get_service_level_objective(name)
def test_list_service_level_objectives(self):
# Setup Expected Response
next_page_token = ""
service_level_objectives_element = {}
service_level_objectives = [service_level_objectives_element]
expected_response = {
"next_page_token": next_page_token,
"service_level_objectives": service_level_objectives,
}
expected_response = service_service_pb2.ListServiceLevelObjectivesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.service_level_objectives[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServiceLevelObjectivesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_service_level_objectives_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service_level_objective = {}
response = client.update_service_level_objective(service_level_objective)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceLevelObjectiveRequest(
service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service_level_objective = {}
with pytest.raises(CustomException):
client.update_service_level_objective(service_level_objective)
def test_delete_service_level_objective(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
client.delete_service_level_objective(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.delete_service_level_objective(name)
| 38.093361
| 88
| 0.680954
| 1,875
| 18,361
| 6.396267
| 0.090133
| 0.065038
| 0.063037
| 0.033353
| 0.853831
| 0.839323
| 0.812474
| 0.797048
| 0.773034
| 0.75494
| 0
| 0.016006
| 0.237787
| 18,361
| 481
| 89
| 38.172557
| 0.840943
| 0.081532
| 0
| 0.680124
| 0
| 0
| 0.094342
| 0.066111
| 0
| 0
| 0
| 0
| 0.093168
| 1
| 0.074534
| false
| 0.003106
| 0.018634
| 0.003106
| 0.111801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d79a6a449f7e90756102071f66e54f7f316037df
| 13,393
|
py
|
Python
|
realworld_benchmark/nets/eig_layer.py
|
DomInvivo/pna
|
1a7d8ae645d093ebedeffcf148a98f6061957a23
|
[
"MIT"
] | null | null | null |
realworld_benchmark/nets/eig_layer.py
|
DomInvivo/pna
|
1a7d8ae645d093ebedeffcf148a98f6061957a23
|
[
"MIT"
] | null | null | null |
realworld_benchmark/nets/eig_layer.py
|
DomInvivo/pna
|
1a7d8ae645d093ebedeffcf148a98f6061957a23
|
[
"MIT"
] | 2
|
2020-11-05T15:34:23.000Z
|
2020-12-17T17:44:48.000Z
|
EPS = 1e-5
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
from .aggregators import AGGREGATORS
from .layers import MLP, FCLayer
from .scalers import SCALERS
class EIGLayerComplex(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, residual,
edge_features, edge_dim, pretrans_layers=1, posttrans_layers=1):
super().__init__()
# retrieve the aggregators and scalers functions
aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()]
scalers = [SCALERS[scale] for scale in scalers.split()]
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.edge_features = edge_features
self.residual = residual
self.aggregators = aggregators
self.scalers = scalers
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.pretrans = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim,
out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none')
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim,
out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.avg_d = avg_d
if in_dim != out_dim:
self.residual = False
def pretrans_edges(self, edges):
if self.edge_features:
z2 = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1)
else:
z2 = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
return {'e': self.pretrans(z2), 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']}
def message_func(self, edges):
return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')}
def reduce_func(self, nodes):
h_in = nodes.data['h']
h = nodes.mailbox['e']
eig_s = nodes.mailbox['eig_s']
eig_d = nodes.mailbox['eig_d']
D = h.shape[-2]
to_cat = []
for aggregate in self.aggregators:
try:
to_cat.append(aggregate(self, h, eig_s, eig_d))
except:
to_cat.append(aggregate(self, h, eig_s, eig_d, h_in))
h = torch.cat(to_cat, dim=1)
if len(self.scalers) > 1:
h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1)
return {'h': h}
def posttrans_nodes(self, nodes):
return self.posttrans(nodes.data['h'])
def forward(self, g, h, e, snorm_n):
h_in = h
g.ndata['h'] = h
if self.edge_features: # add the edges information only if edge_features = True
g.edata['ef'] = e
# pretransformation
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = torch.cat([h, g.ndata['h']], dim=1)
# posttransformation
h = self.posttrans(h)
# graph and batch normalization and residual
if self.graph_norm:
h = h * snorm_n
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.relu(h)
if self.residual:
h = h_in + h
h = F.dropout(h, self.dropout, training=self.training)
return h
class EIGLayerSimple(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, residual, avg_d,
posttrans_layers=1):
super().__init__()
# retrieve the aggregators and scalers functions
aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()]
scalers = [SCALERS[scale] for scale in scalers.split()]
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.aggregators = aggregators
self.scalers = scalers
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers)) * in_dim, hidden_size=out_dim,
out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.avg_d = avg_d
if in_dim != out_dim:
self.residual = False
def pretrans_edges(self, edges):
return {'e': edges.src['h'], 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']}
def message_func(self, edges):
return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'),
'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')}
def reduce_func(self, nodes):
h_in = nodes.data['h']
h = nodes.mailbox['e']
eig_s = nodes.mailbox['eig_s']
eig_d = nodes.mailbox['eig_d']
D = h.shape[-2]
to_cat = []
for aggregate in self.aggregators:
try:
to_cat.append(aggregate(self, h, eig_s, eig_d))
except:
to_cat.append(aggregate(self, h, eig_s, eig_d, h_in))
h = torch.cat(to_cat, dim=1)
if len(self.scalers) > 1:
h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1)
return {'h': h}
def posttrans_nodes(self, nodes):
return self.posttrans(nodes.data['h'])
def forward(self, g, h, e, snorm_n):
h_in = h
g.ndata['h'] = h
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
# posttransformation
h = self.posttrans(h)
# graph and batch normalization and residual
if self.graph_norm:
h = h * snorm_n
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.relu(h)
if self.residual:
h = h_in + h
h = F.dropout(h, self.dropout, training=self.training)
return h
class EIGTower(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d,
pretrans_layers, posttrans_layers, edge_features, edge_dim):
super().__init__()
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.edge_features = edge_features
self.aggregators = aggregators
self.scalers = scalers
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.pretrans = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim,
out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none')
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim,
hidden_size=out_dim,
out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.avg_d = avg_d
def pretrans_edges(self, edges):
if self.edge_features:
z2 = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1)
else:
z2 = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
return {'e': self.pretrans(z2), 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']}
def message_func(self, edges):
return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')}
def reduce_func(self, nodes):
h_in = nodes.data['h']
h = nodes.mailbox['e']
eig_s = nodes.mailbox['eig_s']
eig_d = nodes.mailbox['eig_d']
D = h.shape[-2]
to_cat = []
for aggregate in self.aggregators:
try:
to_cat.append(aggregate(self, h, eig_s, eig_d))
except:
to_cat.append(aggregate(self, h, eig_s, eig_d, h_in))
h = torch.cat(to_cat, dim=1)
if len(self.scalers) > 1:
h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1)
return {'h': h}
def posttrans_nodes(self, nodes):
return self.posttrans(nodes.data['h'])
def forward(self, g, h, e, snorm_n):
g.ndata['h'] = h
if self.edge_features: # add the edges information only if edge_features = True
g.edata['ef'] = e
# pretransformation
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = torch.cat([h, g.ndata['h']], dim=1)
# posttransformation
h = self.posttrans(h)
# graph and batch normalization
if self.graph_norm:
h = h * snorm_n
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class EIGLayerTower(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, aggregators, scalers, avg_d, dropout, graph_norm, batch_norm, towers=5,
pretrans_layers=1, posttrans_layers=1, divide_input=True, residual=False, edge_features=False,
edge_dim=0):
super().__init__()
assert ((
not divide_input) or in_dim % towers == 0), "if divide_input is set the number of towers has to divide in_dim"
assert (out_dim % towers == 0), "the number of towers has to divide the out_dim"
assert avg_d is not None
# retrieve the aggregators and scalers functions
aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()]
scalers = [SCALERS[scale] for scale in scalers.split()]
self.divide_input = divide_input
self.input_tower = in_dim // towers if divide_input else in_dim
self.output_tower = out_dim // towers
self.in_dim = in_dim
self.out_dim = out_dim
self.edge_features = edge_features
self.residual = residual
if in_dim != out_dim:
self.residual = False
# convolution
self.towers = nn.ModuleList()
for _ in range(towers):
self.towers.append(EIGTower(in_dim=self.input_tower, out_dim=self.output_tower, aggregators=aggregators,
scalers=scalers, avg_d=avg_d, pretrans_layers=pretrans_layers,
posttrans_layers=posttrans_layers, batch_norm=batch_norm, dropout=dropout,
graph_norm=graph_norm, edge_features=edge_features, edge_dim=edge_dim))
# mixing network
self.mixing_network = FCLayer(out_dim, out_dim, activation='LeakyReLU')
def forward(self, g, h, e, snorm_n):
h_in = h # for residual connection
if self.divide_input:
h_cat = torch.cat( [tower(g, h[:, n_tower * self.input_tower: (n_tower + 1) * self.input_tower], e, snorm_n)
for n_tower, tower in enumerate(self.towers)], dim=1)
else:
h_cat = torch.cat([tower(g, h, e, snorm_n) for tower in self.towers], dim=1)
if len(self.towers) > 1:
h_out = self.mixing_network(h_cat)
else:
h_out = h_cat
if self.residual:
h_out = h_in + h_out # residual connection
return h_out
class EIGLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, type_net, residual, towers=5, divide_input=True,
edge_features=None, edge_dim=None, pretrans_layers=1, posttrans_layers=1,):
super().__init__()
self.type_net = type_net
if type_net == 'simple':
self.model = EIGLayerSimple(in_dim=in_dim, out_dim=out_dim, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, residual=residual,
aggregators=aggregators, scalers=scalers, avg_d=avg_d, posttrans_layers=posttrans_layers)
elif type_net == 'complex':
self.model = EIGLayerComplex(in_dim=in_dim, out_dim=out_dim, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, aggregators=aggregators, residual=residual,
scalers=scalers, avg_d=avg_d, edge_features=edge_features, edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers)
elif type_net == 'towers':
self.model = EIGLayerTower(in_dim=in_dim, out_dim=out_dim, aggregators=aggregators, scalers=scalers, avg_d=avg_d, dropout=dropout, graph_norm=graph_norm,
batch_norm=batch_norm, towers=towers, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, divide_input=divide_input,
residual=residual, edge_features=edge_features, edge_dim=edge_dim)
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim)
| 37.099723
| 199
| 0.605615
| 1,824
| 13,393
| 4.211075
| 0.077851
| 0.025778
| 0.019919
| 0.017185
| 0.810311
| 0.798985
| 0.787918
| 0.766046
| 0.691186
| 0.685718
| 0
| 0.005266
| 0.276936
| 13,393
| 361
| 200
| 37.099723
| 0.787898
| 0.044874
| 0
| 0.714286
| 0
| 0
| 0.034264
| 0
| 0
| 0
| 0
| 0
| 0.012245
| 1
| 0.089796
| false
| 0
| 0.028571
| 0.032653
| 0.208163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7b1f098471299492d3a164aae5bda72d5a2c99e
| 113
|
py
|
Python
|
bitmovin_api_sdk/encoding/infrastructure/kubernetes/configuration/__init__.py
|
hofmannben/bitmovin-api-sdk-python
|
71aae5cd8a31aa0ad54ca07a6f546a624e8686a9
|
[
"MIT"
] | null | null | null |
bitmovin_api_sdk/encoding/infrastructure/kubernetes/configuration/__init__.py
|
hofmannben/bitmovin-api-sdk-python
|
71aae5cd8a31aa0ad54ca07a6f546a624e8686a9
|
[
"MIT"
] | 1
|
2020-07-06T07:13:43.000Z
|
2020-07-06T07:13:43.000Z
|
bitmovin_api_sdk/encoding/infrastructure/kubernetes/configuration/__init__.py
|
hofmannben/bitmovin-api-sdk-python
|
71aae5cd8a31aa0ad54ca07a6f546a624e8686a9
|
[
"MIT"
] | 1
|
2020-07-06T07:07:26.000Z
|
2020-07-06T07:07:26.000Z
|
from bitmovin_api_sdk.encoding.infrastructure.kubernetes.configuration.configuration_api import ConfigurationApi
| 56.5
| 112
| 0.920354
| 12
| 113
| 8.416667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035398
| 113
| 1
| 113
| 113
| 0.926606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7c96b0c8aeabc0eec3210749c777002aac7b033
| 32,365
|
py
|
Python
|
app/tests/refs/ectyper_dict.py
|
superphy/spfy
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 2
|
2019-05-22T14:29:37.000Z
|
2020-02-13T11:30:46.000Z
|
app/tests/refs/ectyper_dict.py
|
superphy/backend
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 88
|
2017-04-07T21:52:10.000Z
|
2018-03-10T23:12:47.000Z
|
app/tests/refs/ectyper_dict.py
|
superphy/backend
|
867e61b32ab00ec536378f96a63f0fb379f47c58
|
[
"Apache-2.0"
] | 2
|
2017-02-10T21:30:13.000Z
|
2017-06-05T22:30:17.000Z
|
# output from call_ectyper.py, to be sent to beautify.py
# example is from ECI-2866_lcl.fasta_ectyper.p
ectyper_dict = {'Virulence Factors': {'lcl|ECI-2866|NODE_56_length_6694_cov_33.7669_ID_111': [{'START': 4864, 'STOP': 5820, 'ORIENTATION': '+', 'GENE_NAME': 'stx1A'}, {'START': 4873, 'STOP': 5820, 'ORIENTATION': '+', 'GENE_NAME': 'stx1A'}, {'START': 4873, 'STOP': 5820, 'ORIENTATION': '+', 'GENE_NAME': 'stx1vA'}, {'START': 5830, 'STOP': 6099, 'ORIENTATION': '+', 'GENE_NAME': 'stx1B'}, {'START': 5830, 'STOP': 6099, 'ORIENTATION': '+', 'GENE_NAME': 'stx1vB'}], 'lcl|ECI-2866|NODE_144_length_772_cov_35.0868_ID_287': [{'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'epeA'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}], 'lcl|ECI-2866|NODE_37_length_34194_cov_30.2716_ID_73': [{'START': 202, 'STOP': 241, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_13_length_131517_cov_29.4639_ID_25': [{'START': 83949, 'STOP': 86025, 'ORIENTATION': '-', 'GENE_NAME': 'flhA'}, {'START': 94335, 'STOP': 96299, 'ORIENTATION': '-', 'GENE_NAME': 'cheA'}, {'START': 92005, 'STOP': 93666, 'ORIENTATION': '-', 'GENE_NAME': 'tar/cheM'}, {'START': 86018, 'STOP': 87166, 'ORIENTATION': '-', 'GENE_NAME': 'flhB'}, {'START': 88427, 'STOP': 89476, 'ORIENTATION': '-', 'GENE_NAME': 'cheB'}, {'START': 96304, 'STOP': 97230, 'ORIENTATION': '-', 'GENE_NAME': 'motB'}, {'START': 97227, 'STOP': 98114, 'ORIENTATION': '-', 'GENE_NAME': 'motA'}, {'START': 89479, 'STOP': 90339, 'ORIENTATION': '-', 'GENE_NAME': 'cheR'}, {'START': 119779, 'STOP': 120579, 'ORIENTATION': '-', 'GENE_NAME': 'fliY'}, {'START': 121264, 'STOP': 121983, 'ORIENTATION': '-', 'GENE_NAME': 'fliA'}, {'START': 87368, 'STOP': 88012, 'ORIENTATION': '-', 'GENE_NAME': 'cheZ'}, {'START': 98241, 'STOP': 98819, 'ORIENTATION': '-', 'GENE_NAME': 'flhC'}, {'START': 120667, 'STOP': 121254, 'ORIENTATION': '-', 'GENE_NAME': 'fliZ'}, {'START': 93811, 'STOP': 94314, 'ORIENTATION': '-', 'GENE_NAME': 'cheW'}, {'START': 88023, 'STOP': 88412, 'ORIENTATION': '-', 'GENE_NAME': 'cheY'}, {'START': 125330, 'STOP': 125718, 'ORIENTATION': '+', 'GENE_NAME': 'fliS'}, {'START': 83555, 'STOP': 83947, 'ORIENTATION': '-', 'GENE_NAME': 'flhE'}, {'START': 98822, 'STOP': 99181, 'ORIENTATION': '-', 'GENE_NAME': 'flhD'}, {'START': 125718, 'STOP': 126083, 'ORIENTATION': '+', 'GENE_NAME': 'fliT'}, {'START': 75403, 'STOP': 75517, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_49_length_12118_cov_18.277_ID_97': [{'START': 3814, 'STOP': 6414, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 3814, 'STOP': 6087, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 9423, 'STOP': 11510, 'ORIENTATION': '-', 'GENE_NAME': 'c3610'}, {'START': 9423, 'STOP': 11510, 'ORIENTATION': '-', 'GENE_NAME': 'iha'}, {'START': 8223, 'STOP': 8399, 'ORIENTATION': '+', 'GENE_NAME': 'aaiW'}], 'lcl|ECI-2866|NODE_55_length_6881_cov_29.4505_ID_109': [{'START': 1922, 'STOP': 3667, 'ORIENTATION': '-', 'GENE_NAME': 'cei'}], 'lcl|ECI-2866|NODE_33_length_43220_cov_31.1898_ID_65': [{'START': 37776, 'STOP': 39434, 'ORIENTATION': '-', 'GENE_NAME': 'fliF'}, {'START': 34736, 'STOP': 36109, 'ORIENTATION': '-', 'GENE_NAME': 'fliI'}, {'START': 33150, 'STOP': 34277, 'ORIENTATION': '-', 'GENE_NAME': 'fliK'}, {'START': 36788, 'STOP': 37783, 'ORIENTATION': '-', 'GENE_NAME': 'fliG'}, {'START': 31572, 'STOP': 32576, 'ORIENTATION': '-', 'GENE_NAME': 'fliM'}, {'START': 28985, 'STOP': 29770, 'ORIENTATION': '-', 'GENE_NAME': 'fliR'}, {'START': 30057, 'STOP': 30794, 'ORIENTATION': '-', 'GENE_NAME': 'fliP'}, {'START': 36109, 'STOP': 36795, 'ORIENTATION': '-', 'GENE_NAME': 'fliH'}, {'START': 32581, 'STOP': 33045, 'ORIENTATION': '-', 'GENE_NAME': 'fliL'}, {'START': 34274, 'STOP': 34717, 'ORIENTATION': '-', 'GENE_NAME': 'fliJ'}, {'START': 31162, 'STOP': 31575, 'ORIENTATION': '-', 'GENE_NAME': 'fliN'}, {'START': 30794, 'STOP': 31159, 'ORIENTATION': '-', 'GENE_NAME': 'fliO'}, {'START': 39649, 'STOP': 39963, 'ORIENTATION': '+', 'GENE_NAME': 'fliE'}, {'START': 29778, 'STOP': 30047, 'ORIENTATION': '-', 'GENE_NAME': 'fliQ'}], 'lcl|ECI-2866|NODE_60_length_5406_cov_21.6393_ID_119': [{'START': 2729, 'STOP': 5406, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}, {'START': 5262, 'STOP': 5334, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 5262, 'STOP': 5334, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}], 'lcl|ECI-2866|NODE_9_length_157371_cov_34.6522_ID_17': [{'START': 51095, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51104, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51161, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51095, 'STOP': 53612, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51140, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51753, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 54807, 'STOP': 55709, 'ORIENTATION': '+', 'GENE_NAME': 'fimH'}, {'START': 54798, 'STOP': 55709, 'ORIENTATION': '+', 'GENE_NAME': 'fimH'}, {'START': 50303, 'STOP': 51028, 'ORIENTATION': '+', 'GENE_NAME': 'fimC'}, {'START': 50354, 'STOP': 51028, 'ORIENTATION': '+', 'GENE_NAME': 'fimC'}, {'START': 49604, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49621, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49628, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 50372, 'STOP': 51028, 'ORIENTATION': '+', 'GENE_NAME': 'fimC'}, {'START': 46957, 'STOP': 47559, 'ORIENTATION': '+', 'GENE_NAME': 'fimB'}, {'START': 48037, 'STOP': 48633, 'ORIENTATION': '+', 'GENE_NAME': 'fimE'}, {'START': 48037, 'STOP': 48631, 'ORIENTATION': '+', 'GENE_NAME': 'fimE'}, {'START': 53741, 'STOP': 54271, 'ORIENTATION': '+', 'GENE_NAME': 'fimF'}, {'START': 49727, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49727, 'STOP': 50255, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 54284, 'STOP': 54787, 'ORIENTATION': '+', 'GENE_NAME': 'fimG'}, {'START': 49727, 'STOP': 50236, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 54278, 'STOP': 54787, 'ORIENTATION': '+', 'GENE_NAME': 'fimG'}, {'START': 53738, 'STOP': 54271, 'ORIENTATION': '+', 'GENE_NAME': 'fimF'}, {'START': 49769, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49114, 'STOP': 49662, 'ORIENTATION': '+', 'GENE_NAME': 'fimA'}, {'START': 49057, 'STOP': 49662, 'ORIENTATION': '+', 'GENE_NAME': 'fimA'}, {'START': 47152, 'STOP': 47559, 'ORIENTATION': '+', 'GENE_NAME': 'fimB'}, {'START': 47380, 'STOP': 47559, 'ORIENTATION': '+', 'GENE_NAME': 'fimB'}], 'lcl|ECI-2866|NODE_26_length_62239_cov_34.3381_ID_51': [{'START': 60912, 'STOP': 61544, 'ORIENTATION': '-', 'GENE_NAME': 'gadX'}], 'lcl|ECI-2866|NODE_46_length_15742_cov_35.072_ID_91': [{'START': 3402, 'STOP': 4355, 'ORIENTATION': '-', 'GENE_NAME': 'ompt'}], 'lcl|ECI-2866|NODE_63_length_4414_cov_25.6513_ID_125': [{'START': 4054, 'STOP': 4414, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 4054, 'STOP': 4412, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 4054, 'STOP': 4412, 'ORIENTATION': '-', 'GENE_NAME': 'vgrG'}], 'lcl|ECI-2866|NODE_11_length_143533_cov_28.5907_ID_21': [{'START': 11462, 'STOP': 13360, 'ORIENTATION': '-', 'GENE_NAME': 'espL1'}, {'START': 11462, 'STOP': 13015, 'ORIENTATION': '-', 'GENE_NAME': 'espL1'}, {'START': 11462, 'STOP': 12952, 'ORIENTATION': '-', 'GENE_NAME': 'espL1'}], 'lcl|ECI-2866|NODE_15_length_124782_cov_33.4952_ID_29': [{'START': 80313, 'STOP': 81551, 'ORIENTATION': '+', 'GENE_NAME': 'hofq'}, {'START': 202, 'STOP': 247, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_39_length_33722_cov_30.3088_ID_77': [{'START': 32778, 'STOP': 33598, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 32778, 'STOP': 32867, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 32778, 'STOP': 32876, 'ORIENTATION': '+', 'GENE_NAME': 'upaC'}, {'START': 32778, 'STOP': 32876, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 32778, 'STOP': 32858, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 21352, 'STOP': 21387, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_18_length_100066_cov_32.3135_ID_35': [{'START': 97778, 'STOP': 99939, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99516, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99220, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99139, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99028, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 81029, 'STOP': 81063, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_16_length_121752_cov_30.7733_ID_31': [{'START': 75288, 'STOP': 76331, 'ORIENTATION': '-', 'GENE_NAME': 'nada'}, {'START': 25312, 'STOP': 25431, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_6_length_171861_cov_30.779_ID_11': [{'START': 77470, 'STOP': 80070, 'ORIENTATION': '+', 'GENE_NAME': 'ycbS'}, {'START': 97267, 'STOP': 98307, 'ORIENTATION': '-', 'GENE_NAME': 'Z1307'}, {'START': 97267, 'STOP': 98307, 'ORIENTATION': '-', 'GENE_NAME': 'ompA'}, {'START': 80061, 'STOP': 81038, 'ORIENTATION': '+', 'GENE_NAME': 'ycbT'}, {'START': 82174, 'STOP': 82911, 'ORIENTATION': '+', 'GENE_NAME': 'ycbF'}, {'START': 76744, 'STOP': 77445, 'ORIENTATION': '+', 'GENE_NAME': 'ycbR'}, {'START': 81645, 'STOP': 82208, 'ORIENTATION': '+', 'GENE_NAME': 'ycbV'}, {'START': 81209, 'STOP': 81685, 'ORIENTATION': '+', 'GENE_NAME': 'ycbU'}], 'lcl|ECI-2866|NODE_12_length_136264_cov_30.9614_ID_23': [{'START': 21634, 'STOP': 25457, 'ORIENTATION': '+', 'GENE_NAME': 'entF'}, {'START': 17731, 'STOP': 19971, 'ORIENTATION': '-', 'GENE_NAME': 'fepA'}, {'START': 33547, 'STOP': 35157, 'ORIENTATION': '+', 'GENE_NAME': 'entE'}, {'START': 4072, 'STOP': 5454, 'ORIENTATION': '+', 'GENE_NAME': 'ibeB'}, {'START': 4072, 'STOP': 5444, 'ORIENTATION': '+', 'GENE_NAME': 'ibeB'}, {'START': 4068, 'STOP': 5454, 'ORIENTATION': '+', 'GENE_NAME': 'ibeB'}, {'START': 32350, 'STOP': 33537, 'ORIENTATION': '+', 'GENE_NAME': 'entC'}, {'START': 29777, 'STOP': 31027, 'ORIENTATION': '+', 'GENE_NAME': 'entS'}, {'START': 20214, 'STOP': 21416, 'ORIENTATION': '+', 'GENE_NAME': 'fes'}, {'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 25731, 'STOP': 26864, 'ORIENTATION': '+', 'GENE_NAME': 'fepE'}, {'START': 28662, 'STOP': 29678, 'ORIENTATION': '-', 'GENE_NAME': 'fepD'}, {'START': 31031, 'STOP': 31987, 'ORIENTATION': '-', 'GENE_NAME': 'fepB'}, {'START': 5, 'STOP': 1091, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 27673, 'STOP': 28665, 'ORIENTATION': '-', 'GENE_NAME': 'fepG'}, {'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'vgrG'}, {'START': 5, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 35171, 'STOP': 36028, 'ORIENTATION': '+', 'GENE_NAME': 'entB'}, {'START': 26861, 'STOP': 27676, 'ORIENTATION': '-', 'GENE_NAME': 'fepC'}, {'START': 36028, 'STOP': 36774, 'ORIENTATION': '+', 'GENE_NAME': 'entA'}, {'START': 16936, 'STOP': 17706, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_38_length_33984_cov_20.4854_ID_75': [{'START': 17702, 'STOP': 20698, 'ORIENTATION': '-', 'GENE_NAME': 'hlyA'}, {'START': 22471, 'STOP': 25848, 'ORIENTATION': '-', 'GENE_NAME': 'LH0147'}, {'START': 15532, 'STOP': 17652, 'ORIENTATION': '-', 'GENE_NAME': 'hlyB'}, {'START': 14089, 'STOP': 15528, 'ORIENTATION': '-', 'GENE_NAME': 'hlyD'}, {'START': 20700, 'STOP': 21215, 'ORIENTATION': '-', 'GENE_NAME': 'hlyC'}, {'START': 20700, 'STOP': 21191, 'ORIENTATION': '-', 'GENE_NAME': 'hlyC'}, {'START': 6324, 'STOP': 6628, 'ORIENTATION': '-', 'GENE_NAME': 'ccdb'}, {'START': 9127, 'STOP': 9443, 'ORIENTATION': '-', 'GENE_NAME': 'cia'}, {'START': 9127, 'STOP': 9443, 'ORIENTATION': '-', 'GENE_NAME': 'ECS88'}], 'lcl|ECI-2866|NODE_86_length_1960_cov_20.1937_ID_171': [{'START': 1, 'STOP': 938, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 3, 'STOP': 938, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}], 'lcl|ECI-2866|NODE_73_length_2413_cov_26.5752_ID_145': [{'START': 472, 'STOP': 1266, 'ORIENTATION': '+', 'GENE_NAME': 'saa'}], 'lcl|ECI-2866|NODE_23_length_82758_cov_29.125_ID_45': [{'START': 21752, 'STOP': 21790, 'ORIENTATION': '-', 'GENE_NAME': 'focD'}], 'lcl|ECI-2866|NODE_14_length_130829_cov_35.5941_ID_27': [{'START': 31541, 'STOP': 34063, 'ORIENTATION': '+', 'GENE_NAME': 'stgC'}, {'START': 82988, 'STOP': 84865, 'ORIENTATION': '+', 'GENE_NAME': 'espL3'}, {'START': 34074, 'STOP': 35147, 'ORIENTATION': '+', 'GENE_NAME': 'stgD'}, {'START': 30785, 'STOP': 31516, 'ORIENTATION': '+', 'GENE_NAME': 'stgB'}, {'START': 30165, 'STOP': 30737, 'ORIENTATION': '+', 'GENE_NAME': 'stgA'}, {'START': 30165, 'STOP': 30737, 'ORIENTATION': '+', 'GENE_NAME': 'lpfao113'}, {'START': 130517, 'STOP': 130829, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}, {'START': 130658, 'STOP': 130829, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}], 'lcl|ECI-2866|NODE_21_length_94236_cov_29.797_ID_41': [{'START': 49622, 'STOP': 51265, 'ORIENTATION': '-', 'GENE_NAME': 'flgK'}, {'START': 55827, 'STOP': 57032, 'ORIENTATION': '-', 'GENE_NAME': 'flgE'}, {'START': 52272, 'STOP': 53369, 'ORIENTATION': '-', 'GENE_NAME': 'flgI'}, {'START': 51331, 'STOP': 52272, 'ORIENTATION': '-', 'GENE_NAME': 'flgJ'}, {'START': 48657, 'STOP': 49610, 'ORIENTATION': '-', 'GENE_NAME': 'flgL'}, {'START': 87924, 'STOP': 88757, 'ORIENTATION': '+', 'GENE_NAME': 'csgG'}, {'START': 54132, 'STOP': 54914, 'ORIENTATION': '-', 'GENE_NAME': 'flgG'}, {'START': 55052, 'STOP': 55807, 'ORIENTATION': '-', 'GENE_NAME': 'flgF'}, {'START': 53381, 'STOP': 54079, 'ORIENTATION': '-', 'GENE_NAME': 'flgH'}, {'START': 57057, 'STOP': 57752, 'ORIENTATION': '-', 'GENE_NAME': 'flgD'}, {'START': 86412, 'STOP': 87062, 'ORIENTATION': '+', 'GENE_NAME': 'csgD'}, {'START': 58743, 'STOP': 59402, 'ORIENTATION': '+', 'GENE_NAME': 'flgA'}, {'START': 8349, 'STOP': 9136, 'ORIENTATION': '+', 'GENE_NAME': 'ycfz'}, {'START': 85203, 'STOP': 85685, 'ORIENTATION': '-', 'GENE_NAME': 'csgB'}, {'START': 85203, 'STOP': 85658, 'ORIENTATION': '-', 'GENE_NAME': 'csgB'}, {'START': 87481, 'STOP': 87897, 'ORIENTATION': '+', 'GENE_NAME': 'csgF'}, {'START': 58172, 'STOP': 58588, 'ORIENTATION': '-', 'GENE_NAME': 'flgB'}, {'START': 59776, 'STOP': 60192, 'ORIENTATION': '+', 'GENE_NAME': 'flgN'}, {'START': 57764, 'STOP': 58168, 'ORIENTATION': '-', 'GENE_NAME': 'flgC'}, {'START': 87067, 'STOP': 87456, 'ORIENTATION': '+', 'GENE_NAME': 'csgE'}, {'START': 84707, 'STOP': 85162, 'ORIENTATION': '-', 'GENE_NAME': 'csgA'}, {'START': 84316, 'STOP': 84648, 'ORIENTATION': '-', 'GENE_NAME': 'csgC'}], 'lcl|ECI-2866|NODE_36_length_35992_cov_31.0701_ID_71': [{'START': 13181, 'STOP': 16705, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 13181, 'STOP': 16645, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 13181, 'STOP': 16614, 'ORIENTATION': '-', 'GENE_NAME': 'icmF'}, {'START': 13181, 'STOP': 16288, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 13181, 'STOP': 16085, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 18881, 'STOP': 21646, 'ORIENTATION': '-', 'GENE_NAME': 'clpV'}, {'START': 18881, 'STOP': 21760, 'ORIENTATION': '-', 'GENE_NAME': 'clpV'}, {'START': 32611, 'STOP': 34752, 'ORIENTATION': '+', 'GENE_NAME': 'vgrG'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'UMNK88'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'ECO111'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'O3M'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'ECO103'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'G2583'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'ECs0229'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'Z0260'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'EcE24377A'}, {'START': 20064, 'STOP': 21646, 'ORIENTATION': '-', 'GENE_NAME': 'clpV'}, {'START': 32611, 'STOP': 34675, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 32600, 'STOP': 34675, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 28901, 'STOP': 30376, 'ORIENTATION': '-', 'GENE_NAME': 'aec18'}, {'START': 11818, 'STOP': 13260, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 16724, 'STOP': 18136, 'ORIENTATION': '-', 'GENE_NAME': 'aec29'}, {'START': 11861, 'STOP': 13260, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 28988, 'STOP': 30376, 'ORIENTATION': '-', 'GENE_NAME': 'aec18'}, {'START': 11818, 'STOP': 13170, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 22421, 'STOP': 23752, 'ORIENTATION': '-', 'GENE_NAME': 'aec25'}, {'START': 11861, 'STOP': 13170, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 24276, 'STOP': 25577, 'ORIENTATION': '-', 'GENE_NAME': 'aec23'}, {'START': 24276, 'STOP': 25556, 'ORIENTATION': '-', 'GENE_NAME': 'aec23'}, {'START': 25581, 'STOP': 26663, 'ORIENTATION': '-', 'GENE_NAME': 'aec22'}, {'START': 25581, 'STOP': 26669, 'ORIENTATION': '-', 'GENE_NAME': 'aec22'}, {'START': 24276, 'STOP': 25286, 'ORIENTATION': '-', 'GENE_NAME': 'aec23'}, {'START': 29488, 'STOP': 30376, 'ORIENTATION': '-', 'GENE_NAME': 'aec18'}, {'START': 21655, 'STOP': 22416, 'ORIENTATION': '-', 'GENE_NAME': 'aec26'}, {'START': 18141, 'STOP': 18884, 'ORIENTATION': '-', 'GENE_NAME': 'aec28'}, {'START': 16724, 'STOP': 17518, 'ORIENTATION': '-', 'GENE_NAME': 'aec29'}, {'START': 23755, 'STOP': 24279, 'ORIENTATION': '-', 'GENE_NAME': 'aec24'}, {'START': 30686, 'STOP': 31186, 'ORIENTATION': '-', 'GENE_NAME': 'aec17'}, {'START': 31883, 'STOP': 32401, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}, {'START': 31883, 'STOP': 32401, 'ORIENTATION': '+', 'GENE_NAME': 'aec16'}, {'START': 11312, 'STOP': 11794, 'ORIENTATION': '-', 'GENE_NAME': 'aec32'}, {'START': 28481, 'STOP': 28894, 'ORIENTATION': '-', 'GENE_NAME': 'aec19'}, {'START': 30427, 'STOP': 30651, 'ORIENTATION': '-', 'GENE_NAME': 'Z0263'}, {'START': 31613, 'STOP': 31756, 'ORIENTATION': '+', 'GENE_NAME': 'Z0265'}], 'lcl|ECI-2866|NODE_22_length_88582_cov_33.0406_ID_43': [{'START': 37513, 'STOP': 37651, 'ORIENTATION': '-', 'GENE_NAME': 'aslA'}, {'START': 80711, 'STOP': 80745, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_2_length_413768_cov_33.1857_ID_3': [{'START': 398693, 'STOP': 400780, 'ORIENTATION': '+', 'GENE_NAME': 'c3610'}, {'START': 398693, 'STOP': 400780, 'ORIENTATION': '+', 'GENE_NAME': 'iha'}, {'START': 242573, 'STOP': 243949, 'ORIENTATION': '+', 'GENE_NAME': 'ygeH'}, {'START': 250669, 'STOP': 251789, 'ORIENTATION': '-', 'GENE_NAME': 'epaS'}, {'START': 253490, 'STOP': 254477, 'ORIENTATION': '-', 'GENE_NAME': 'epaO'}, {'START': 412793, 'STOP': 413696, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 412793, 'STOP': 413696, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}, {'START': 363054, 'STOP': 364034, 'ORIENTATION': '-', 'GENE_NAME': 'yggr'}, {'START': 247037, 'STOP': 247771, 'ORIENTATION': '-', 'GENE_NAME': 'eprK'}, {'START': 248373, 'STOP': 249107, 'ORIENTATION': '-', 'GENE_NAME': 'eprH'}, {'START': 252836, 'STOP': 253500, 'ORIENTATION': '-', 'GENE_NAME': 'epaP'}, {'START': 244939, 'STOP': 245571, 'ORIENTATION': '-', 'GENE_NAME': 'cs3'}, {'START': 384200, 'STOP': 384954, 'ORIENTATION': '-', 'GENE_NAME': 'tia'}, {'START': 213630, 'STOP': 214193, 'ORIENTATION': '-', 'GENE_NAME': 'ppdb'}, {'START': 384200, 'STOP': 384940, 'ORIENTATION': '-', 'GENE_NAME': 'hra1-3'}, {'START': 246440, 'STOP': 247021, 'ORIENTATION': '-', 'GENE_NAME': 'orgA'}, {'START': 384200, 'STOP': 384773, 'ORIENTATION': '-', 'GENE_NAME': 'tia'}, {'START': 244418, 'STOP': 244894, 'ORIENTATION': '+', 'GENE_NAME': 'b2854'}, {'START': 214184, 'STOP': 214654, 'ORIENTATION': '-', 'GENE_NAME': 'ppda'}, {'START': 249812, 'STOP': 250312, 'ORIENTATION': '+', 'GENE_NAME': 'etrA'}, {'START': 241752, 'STOP': 242238, 'ORIENTATION': '+', 'GENE_NAME': 'ygeG'}, {'START': 252115, 'STOP': 252578, 'ORIENTATION': '-', 'GENE_NAME': 'epaR'}, {'START': 245792, 'STOP': 246223, 'ORIENTATION': '-', 'GENE_NAME': 'orgB'}, {'START': 213226, 'STOP': 213633, 'ORIENTATION': '-', 'GENE_NAME': 'ygdb'}, {'START': 244520, 'STOP': 244919, 'ORIENTATION': '+', 'GENE_NAME': 'iagB'}, {'START': 247768, 'STOP': 248099, 'ORIENTATION': '-', 'GENE_NAME': 'eprJ'}, {'START': 212918, 'STOP': 213241, 'ORIENTATION': '-', 'GENE_NAME': 'ppdc'}, {'START': 252567, 'STOP': 252826, 'ORIENTATION': '-', 'GENE_NAME': 'epaQ'}, {'START': 248119, 'STOP': 248359, 'ORIENTATION': '-', 'GENE_NAME': 'eprI'}, {'START': 413624, 'STOP': 413768, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}, {'START': 254437, 'STOP': 254564, 'ORIENTATION': '-', 'GENE_NAME': 'eivJ'}], 'lcl|ECI-2866|NODE_19_length_99613_cov_36.368_ID_37': [{'START': 83233, 'STOP': 86613, 'ORIENTATION': '-', 'GENE_NAME': 'upaG/ehaG'}, {'START': 83233, 'STOP': 85878, 'ORIENTATION': '-', 'GENE_NAME': 'upaG/ehaG'}, {'START': 1, 'STOP': 1695, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 323, 'STOP': 1695, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 18925, 'STOP': 19680, 'ORIENTATION': '+', 'GENE_NAME': 'tia'}, {'START': 18934, 'STOP': 19680, 'ORIENTATION': '+', 'GENE_NAME': 'tia'}], 'lcl|ECI-2866|NODE_10_length_145407_cov_31.9536_ID_19': [{'START': 53744, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 53744, 'STOP': 57934, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 27172, 'STOP': 31221, 'ORIENTATION': '-', 'GENE_NAME': 'ehaA'}, {'START': 27172, 'STOP': 31155, 'ORIENTATION': '-', 'GENE_NAME': 'ehaA'}, {'START': 53750, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 53787, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 53787, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'fdeC'}, {'START': 63021, 'STOP': 65546, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 63021, 'STOP': 65546, 'ORIENTATION': '+', 'GENE_NAME': 'ecpC'}, {'START': 65536, 'STOP': 67179, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 65536, 'STOP': 67179, 'ORIENTATION': '+', 'GENE_NAME': 'ecpD'}, {'START': 65560, 'STOP': 67179, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67148, 'STOP': 67858, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67103, 'STOP': 67858, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67103, 'STOP': 67856, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67103, 'STOP': 67856, 'ORIENTATION': '+', 'GENE_NAME': 'ecpE'}, {'START': 62278, 'STOP': 62995, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67148, 'STOP': 67856, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 62327, 'STOP': 62995, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 62327, 'STOP': 62995, 'ORIENTATION': '+', 'GENE_NAME': 'ecpB'}, {'START': 67195, 'STOP': 67858, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61682, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61017, 'STOP': 61607, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61682, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ecpA'}, {'START': 61653, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61017, 'STOP': 61607, 'ORIENTATION': '+', 'GENE_NAME': 'ecpR'}, {'START': 61065, 'STOP': 61607, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61820, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 115613, 'STOP': 115664, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_65_length_4116_cov_46.3389_ID_129': [{'START': 1594, 'STOP': 4115, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 1, 'STOP': 1581, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}, {'START': 1594, 'STOP': 2431, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 1270, 'STOP': 1581, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}], 'lcl|ECI-2866|NODE_4_length_261081_cov_33.6293_ID_7': [{'START': 121134, 'STOP': 123309, 'ORIENTATION': '+', 'GENE_NAME': 'cadA'}, {'START': 252571, 'STOP': 254144, 'ORIENTATION': '+', 'GENE_NAME': 'espL4'}, {'START': 219797, 'STOP': 221377, 'ORIENTATION': '-', 'GENE_NAME': 'espX4'}, {'START': 188969, 'STOP': 190261, 'ORIENTATION': '+', 'GENE_NAME': 'espX5'}, {'START': 252571, 'STOP': 253710, 'ORIENTATION': '+', 'GENE_NAME': 'espL4'}], 'lcl|ECI-2866|NODE_283_length_368_cov_35.9461_ID_565': [{'START': 1, 'STOP': 368, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 1, 'STOP': 368, 'ORIENTATION': '+', 'GENE_NAME': 'vgrG'}], 'lcl|ECI-2866|NODE_109_length_1364_cov_53.3694_ID_217': [{'START': 1, 'STOP': 1349, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}, {'START': 507, 'STOP': 1349, 'ORIENTATION': '+', 'GENE_NAME': 'epeA'}], 'lcl|ECI-2866|NODE_34_length_40896_cov_32.935_ID_67': [{'START': 23077, 'STOP': 24810, 'ORIENTATION': '-', 'GENE_NAME': 'ibeC'}, {'START': 23077, 'STOP': 24855, 'ORIENTATION': '-', 'GENE_NAME': 'ibeC'}, {'START': 23077, 'STOP': 24855, 'ORIENTATION': '-', 'GENE_NAME': 'yijP'}], 'lcl|ECI-2866|NODE_210_length_441_cov_0.780255_ID_419': [{'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'ECO103'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'ECNA114'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'ECP'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'APECO1'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'c3401'}, {'START': 1, 'STOP': 352, 'ORIENTATION': '-', 'GENE_NAME': 'ECABU'}, {'START': 1, 'STOP': 212, 'ORIENTATION': '-', 'GENE_NAME': 'EcE24377A'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECO103'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'EC958'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'LF82'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECOK1'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECS88'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECNA114'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'i02'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECABU'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'APECO1'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECP'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'c3400'}], 'lcl|ECI-2866|NODE_28_length_54599_cov_28.4892_ID_55': [{'START': 202, 'STOP': 252, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_35_length_40076_cov_27.9792_ID_69': [{'START': 14025, 'STOP': 15355, 'ORIENTATION': '-', 'GENE_NAME': 'espR1'}, {'START': 14026, 'STOP': 15089, 'ORIENTATION': '-', 'GENE_NAME': 'espR1'}, {'START': 15070, 'STOP': 15394, 'ORIENTATION': '-', 'GENE_NAME': 'espR2'}], 'lcl|ECI-2866|NODE_24_length_71378_cov_29.2686_ID_47': [{'START': 51550, 'STOP': 54201, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51550, 'STOP': 54201, 'ORIENTATION': '+', 'GENE_NAME': 'Z2203'}, {'START': 55320, 'STOP': 56234, 'ORIENTATION': '+', 'GENE_NAME': 'Z2206'}, {'START': 50789, 'STOP': 51508, 'ORIENTATION': '+', 'GENE_NAME': 'Z2201'}, {'START': 49874, 'STOP': 50437, 'ORIENTATION': '+', 'GENE_NAME': 'Z2200'}, {'START': 54215, 'STOP': 54745, 'ORIENTATION': '+', 'GENE_NAME': 'Z2204'}, {'START': 54215, 'STOP': 54745, 'ORIENTATION': '+', 'GENE_NAME': 'fimF'}, {'START': 54758, 'STOP': 55261, 'ORIENTATION': '+', 'GENE_NAME': 'fimG'}, {'START': 54758, 'STOP': 55261, 'ORIENTATION': '+', 'GENE_NAME': 'Z2205'}], 'lcl|ECI-2866|NODE_29_length_51952_cov_29.7413_ID_57': [{'START': 31250, 'STOP': 31981, 'ORIENTATION': '+', 'GENE_NAME': 'artj'}], 'lcl|ECI-2866|NODE_5_length_211409_cov_32.4567_ID_9': [{'START': 98098, 'STOP': 99483, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}, {'START': 194600, 'STOP': 196021, 'ORIENTATION': '+', 'GENE_NAME': 'espX1'}, {'START': 99473, 'STOP': 100675, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}, {'START': 97648, 'STOP': 98088, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}], 'lcl|ECI-2866|NODE_41_length_26475_cov_27.3951_ID_81': [{'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 5, 'STOP': 1093, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 5, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'vgrG'}], 'lcl|ECI-2866|NODE_1_length_488407_cov_30.2969_ID_1': [{'START': 14178, 'STOP': 15800, 'ORIENTATION': '-', 'GENE_NAME': 'nadb'}, {'START': 270497, 'STOP': 271491, 'ORIENTATION': '-', 'GENE_NAME': 'flk'}], 'lcl|ECI-2866|NODE_3_length_280483_cov_33.8271_ID_5': [{'START': 174238, 'STOP': 176940, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 174256, 'STOP': 176056, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 174559, 'STOP': 176940, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 176937, 'STOP': 178025, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 176991, 'STOP': 178025, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 107619, 'STOP': 108503, 'ORIENTATION': '+', 'GENE_NAME': 'ECS88'}, {'START': 172918, 'STOP': 173634, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 173664, 'STOP': 174164, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 177359, 'STOP': 178025, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}], 'lcl|ECI-2866|NODE_20_length_98076_cov_34.1972_ID_39': [{'START': 55393, 'STOP': 58512, 'ORIENTATION': '-', 'GENE_NAME': 'agn43'}, {'START': 34983, 'STOP': 37043, 'ORIENTATION': '+', 'GENE_NAME': 'gspD'}, {'START': 87782, 'STOP': 89569, 'ORIENTATION': '+', 'GENE_NAME': 'EC55989'}, {'START': 87782, 'STOP': 89569, 'ORIENTATION': '+', 'GENE_NAME': 'EC042'}, {'START': 87905, 'STOP': 89569, 'ORIENTATION': '+', 'GENE_NAME': 'O3M'}, {'START': 37043, 'STOP': 38536, 'ORIENTATION': '+', 'GENE_NAME': 'gspE'}, {'START': 38536, 'STOP': 39759, 'ORIENTATION': '+', 'GENE_NAME': 'gspF'}, {'START': 42739, 'STOP': 43917, 'ORIENTATION': '+', 'GENE_NAME': 'gspL'}, {'START': 55393, 'STOP': 56578, 'ORIENTATION': '-', 'GENE_NAME': 'EC958'}, {'START': 55393, 'STOP': 56706, 'ORIENTATION': '-', 'GENE_NAME': 'cah'}, {'START': 55393, 'STOP': 56578, 'ORIENTATION': '-', 'GENE_NAME': 'cah'}, {'START': 55393, 'STOP': 56565, 'ORIENTATION': '-', 'GENE_NAME': 'cah'}, {'START': 55393, 'STOP': 56706, 'ORIENTATION': '-', 'GENE_NAME': 'EC958'}, {'START': 41765, 'STOP': 42742, 'ORIENTATION': '+', 'GENE_NAME': 'gspK'}, {'START': 33994, 'STOP': 34953, 'ORIENTATION': '+', 'GENE_NAME': 'gspC'}, {'START': 32691, 'STOP': 33500, 'ORIENTATION': '+', 'GENE_NAME': 'b2972'}, {'START': 89992, 'STOP': 90780, 'ORIENTATION': '+', 'GENE_NAME': 'EC55989'}, {'START': 41163, 'STOP': 41762, 'ORIENTATION': '+', 'GENE_NAME': 'gspJ'}, {'START': 40235, 'STOP': 40798, 'ORIENTATION': '+', 'GENE_NAME': 'gspH'}, {'START': 43919, 'STOP': 44455, 'ORIENTATION': '+', 'GENE_NAME': 'gspM'}, {'START': 39776, 'STOP': 40231, 'ORIENTATION': '+', 'GENE_NAME': 'gspG'}, {'START': 33566, 'STOP': 33976, 'ORIENTATION': '+', 'GENE_NAME': 'yghg'}, {'START': 40795, 'STOP': 41166, 'ORIENTATION': '+', 'GENE_NAME': 'gspI'}, {'START': 89644, 'STOP': 89940, 'ORIENTATION': '+', 'GENE_NAME': 'EC042'}, {'START': 89644, 'STOP': 89940, 'ORIENTATION': '+', 'GENE_NAME': 'O3M'}, {'START': 32553, 'STOP': 32597, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_145_length_772_cov_18.1659_ID_289': [{'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'epeA'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}], 'lcl|ECI-2866|NODE_30_length_50634_cov_30.1661_ID_59': [{'START': 21407, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21551, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21545, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21551, 'STOP': 22398, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21551, 'STOP': 22061, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 22181, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 22202, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'EC958'}, {'START': 22181, 'STOP': 22398, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}], 'lcl|ECI-2866|NODE_51_length_8356_cov_18.7279_ID_101': [{'START': 8044, 'STOP': 8356, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}, {'START': 8185, 'STOP': 8356, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}], 'lcl|ECI-2866|NODE_17_length_104608_cov_35.3736_ID_33': [{'START': 54782, 'STOP': 55306, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}]}, 'Serotype': {'O type': 'O22', 'H type': 'H8'}}
| 10,788.333333
| 32,261
| 0.601792
| 3,807
| 32,365
| 4.92041
| 0.261361
| 0.318706
| 0.403694
| 0.036622
| 0.474749
| 0.388747
| 0.366859
| 0.118727
| 0.083707
| 0.061926
| 0
| 0.172083
| 0.100633
| 32,365
| 3
| 32,261
| 10,788.333333
| 0.471451
| 0.003059
| 0
| 0
| 0
| 0
| 0.501643
| 0.077424
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7eb837a01bcf4d82ba4037dbb7897de0570733c
| 26
|
py
|
Python
|
tests/auctionbets/test_hello_world.py
|
sam-bailey/auctionbets
|
237f2c4d1cb2e3ba2e3194aab35ec85b7bd565f4
|
[
"MIT"
] | null | null | null |
tests/auctionbets/test_hello_world.py
|
sam-bailey/auctionbets
|
237f2c4d1cb2e3ba2e3194aab35ec85b7bd565f4
|
[
"MIT"
] | 4
|
2021-04-11T15:06:50.000Z
|
2021-04-11T19:11:43.000Z
|
tests/melvin/test_hello_world.py
|
sam-bailey/melvin
|
562bd17d84d78f54eb93b77d6aa8c72556a0a31f
|
[
"MIT"
] | null | null | null |
print("Test hello world")
| 13
| 25
| 0.730769
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
cc284745b925e21a5f5e32898b37c6a8ab358a00
| 21,312
|
py
|
Python
|
src/falconpy/custom_ioa.py
|
mccbryan3/falconpy
|
ec4d3a574f2e9b06d046fc8d7ca6818f1f97331f
|
[
"Unlicense"
] | null | null | null |
src/falconpy/custom_ioa.py
|
mccbryan3/falconpy
|
ec4d3a574f2e9b06d046fc8d7ca6818f1f97331f
|
[
"Unlicense"
] | null | null | null |
src/falconpy/custom_ioa.py
|
mccbryan3/falconpy
|
ec4d3a574f2e9b06d046fc8d7ca6818f1f97331f
|
[
"Unlicense"
] | null | null | null |
"""
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
custom_ioa - Falcon Custom Indicators of Attack API Interface Class
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
# pylint: disable=C0103 # Aligning method names to API operation IDs
from ._util import service_request, parse_id_list, force_default, args_to_params
from ._service_class import ServiceClass
from ._endpoint._custom_ioa import _custom_ioa_endpoints as Endpoints
class Custom_IOA(ServiceClass):
"""
The only requirement to instantiate an instance of this class
is a valid token provided by the Falcon API SDK OAuth2 class.
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def get_patterns(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get pattern severities by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-patterns
operation_id = "get_patterns"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_platformsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get platforms by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-platformsMixin0
operation_id = "get_platformsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rule_groupsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rule groups by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-groupsMixin0
operation_id = "get_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def create_rule_groupMixin0(self: object, body: dict, cs_username: str) -> dict:
"""
Create a rule group for a platform with a name and an optional description. Returns the rule group.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule-groupMixin0
operation_id = "create_rule_groupMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rule_groupMixin0(self: object, *args, **kwargs) -> dict:
"""
Delete rule groups by ID. (Redirects to actual method. Typo fix.)
"""
returned = self.delete_rule_groupsMixin0(*args, **kwargs)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rule_groupsMixin0(self: object, cs_username: str, parameters: dict = None, **kwargs) -> dict:
"""
Delete rule groups by ID.
"""
# [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/delete-rule-groupsMixin0
operation_id = "delete_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="DELETE",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def update_rule_groupMixin0(self: object, body: dict, cs_username: str) -> dict:
"""
Update a rule group. The following properties can be modified: name, description, enabled.
"""
# [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rule-groupMixin0
operation_id = "update_rule_groupMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="PATCH",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rule types by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-types
operation_id = "get_rule_types"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def get_rules_get(self: object, ids) -> dict:
"""
Get rules by ID and optionally version in the following format: ID[:version]
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rules-get
operation_id = "get_rules_get"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
body_payload = {}
body_payload["ids"] = parse_id_list(ids).split(",")
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rulesMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rules by ID and optionally version in the following format: ID[:version].
The max number of IDs is constrained by URL size.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rulesMixin0
operation_id = "get_rulesMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def create_rule(self: object, body: dict, cs_username: str) -> dict:
"""
Create a rule within a rule group. Returns the rule.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule
operation_id = "create_rule"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rules(self: object, cs_username: str, parameters: dict = None, **kwargs) -> dict:
"""
Delete rules from a rule group by ID.
"""
# [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/delete-rules
operation_id = "delete_rules"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="DELETE",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def update_rules(self: object, body: dict, cs_username: str) -> dict:
"""
Update rules within a rule group. Return the updated rules.
"""
# [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rules
operation_id = "update_rules"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="PATCH",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def validate(self: object, body: dict) -> dict:
"""
Validates field values and checks for matches if a test string is provided.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/validate
operation_id = "validate"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_patterns(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all pattern severity IDs
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-patterns
operation_id = "query_patterns"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_platformsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all platform IDs.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-platformsMixin0
operation_id = "query_platformsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_groups_full(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Find all rule groups matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-groups-full
operation_id = "query_rule_groups_full"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_groupsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Finds all rule group IDs matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-groupsMixin0
operation_id = "query_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all rule type IDs.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-types
operation_id = "query_rule_types"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rulesMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Finds all rule IDs matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rulesMixin0
operation_id = "query_rulesMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
| 50.742857
| 119
| 0.555602
| 2,219
| 21,312
| 5.129788
| 0.111762
| 0.049284
| 0.043925
| 0.046736
| 0.775894
| 0.775894
| 0.775894
| 0.769042
| 0.764122
| 0.729772
| 0
| 0.006903
| 0.347504
| 21,312
| 419
| 120
| 50.863962
| 0.811664
| 0.227618
| 0
| 0.763636
| 0
| 0.069091
| 0.130973
| 0.036102
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.010909
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0bd684f449fb29bb6b0b014c1fddbe47cd12fbe1
| 302
|
py
|
Python
|
src/utils/libraries/index.py
|
Shellyda/Algorithms-Sorting-Project
|
205f76b5127a53829056889e46cf240e0d75cbb5
|
[
"MIT"
] | null | null | null |
src/utils/libraries/index.py
|
Shellyda/Algorithms-Sorting-Project
|
205f76b5127a53829056889e46cf240e0d75cbb5
|
[
"MIT"
] | null | null | null |
src/utils/libraries/index.py
|
Shellyda/Algorithms-Sorting-Project
|
205f76b5127a53829056889e46cf240e0d75cbb5
|
[
"MIT"
] | null | null | null |
from utils.libraries.Get_duration_execution_time import Get_duration_execution_time
from utils.libraries.Bubble_sort import Bubble_sort
from utils.libraries.Insertion_sort import Insertion_sort
from utils.libraries.Merge_sort import Merge_sort
from utils.libraries.Selection_sort import Selection_sort
| 50.333333
| 83
| 0.900662
| 44
| 302
| 5.863636
| 0.295455
| 0.174419
| 0.348837
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066225
| 302
| 5
| 84
| 60.4
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
04173ab291a28dec9be5ec4dc8e4e4e26a03aeda
| 20
|
py
|
Python
|
purviewcli/model/__init__.py
|
pblocz/purviewcli
|
4f3ac4f746fac80a2db1e8c6910b88b2a70cb21b
|
[
"MIT"
] | null | null | null |
purviewcli/model/__init__.py
|
pblocz/purviewcli
|
4f3ac4f746fac80a2db1e8c6910b88b2a70cb21b
|
[
"MIT"
] | null | null | null |
purviewcli/model/__init__.py
|
pblocz/purviewcli
|
4f3ac4f746fac80a2db1e8c6910b88b2a70cb21b
|
[
"MIT"
] | null | null | null |
from .atlas import *
| 20
| 20
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
044cd6d55aaaa85ed616104d85f7411196227c7a
| 91
|
py
|
Python
|
pm4pymdl/algo/mvp/gen_framework/rel_activities/__init__.py
|
dorian1000/pm4py-mdl
|
71e0c2425abb183da293a58d31e25e50137c774f
|
[
"MIT"
] | 5
|
2021-01-31T22:45:29.000Z
|
2022-02-22T14:26:06.000Z
|
pm4pymdl/algo/mvp/gen_framework/rel_activities/__init__.py
|
Javert899/pm4py-mdl
|
4cc875999100f3f1ad60b925a20e40cf52337757
|
[
"MIT"
] | 3
|
2021-07-07T15:32:55.000Z
|
2021-07-07T16:15:36.000Z
|
pm4pymdl/algo/mvp/gen_framework/rel_activities/__init__.py
|
dorian1000/pm4py-mdl
|
71e0c2425abb183da293a58d31e25e50137c774f
|
[
"MIT"
] | 9
|
2020-09-23T15:34:11.000Z
|
2022-03-17T09:15:40.000Z
|
from pm4pymdl.algo.mvp.gen_framework.rel_activities import classic, rel_activities_builder
| 45.5
| 90
| 0.89011
| 13
| 91
| 5.923077
| 0.846154
| 0.337662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.054945
| 91
| 1
| 91
| 91
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f087078a59a53f53c94da1a5511f5c19ca713f04
| 6,975
|
py
|
Python
|
utils/hamiltonian.py
|
fhoeb/fh-thesis-scripts
|
8894296ee2ca64bc208cc28803ac888b33bb4a94
|
[
"BSD-3-Clause"
] | 2
|
2020-09-27T16:17:06.000Z
|
2022-02-01T15:25:40.000Z
|
utils/hamiltonian.py
|
fhoeb/fh-thesis-scripts
|
8894296ee2ca64bc208cc28803ac888b33bb4a94
|
[
"BSD-3-Clause"
] | null | null | null |
utils/hamiltonian.py
|
fhoeb/fh-thesis-scripts
|
8894296ee2ca64bc208cc28803ac888b33bb4a94
|
[
"BSD-3-Clause"
] | 1
|
2021-01-18T00:13:01.000Z
|
2021-01-18T00:13:01.000Z
|
from scipy.special import factorial
from itertools import count
import numpy as np
from tmps.utils import pauli, fock
def get_boson_boson_dim(alpha, cutoff_coh):
"""
Find the cutoff for the local dimension (identical everywhere) from the chosen accuracy alpha for the impurity
coherent state.
"""
#
pop = lambda x: np.exp(-np.abs(alpha) ** 2 / 2) * alpha ** x / np.sqrt(factorial(x, exact=True))
cutoff_dim = 2
for n in count(cutoff_dim, 1):
if np.abs(pop(n))**2 < cutoff_coh:
cutoff_dim = n
break
return cutoff_dim
def get_spin_boson_chain_hamiltonian(omega_0, c0, omega, t, bath_local_dim, finite_T=False):
"""
Returns local and coupling parts of the Spin-Boson model chain Hamiltonian
used in Sec. 4.4.1 and 4.4.2 of the thesis.
:param omega_0: Spin energy
:param c0: Spin-Bath coupling
:param omega: Bath energies
:param t: Bath-bath couplings
:param bath_local_dim: Local dimension of the bath
:param finite_T: If set True builds the Hamiltonian for Sec. 4.4.2. If False builds the Hamiltonian for Sec. 4.4.1
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
if not finite_T:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.X
# Coupling between System and bath:
spin_coupl = pauli.Z
else:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.Z
# Coupling between System and bath:
spin_coupl = np.array([[0, 0], [1, 0]], dtype=np.complex128)
# Local Hamiltonian of the bath
fock_n = fock.n(bath_local_dim)
bath_loc = [energy * fock_n for energy in omega]
# Bath coupling
bath_coupling_op = np.kron(fock.a(bath_local_dim), fock.a_dag(bath_local_dim)) + \
np.kron(fock.a_dag(bath_local_dim), fock.a(bath_local_dim))
bath_bath_coupl = [coupling * bath_coupling_op for coupling in t]
# Spin-Bath coupling
spin_bath_coupl = c0 * (np.kron(spin_coupl, fock.a_dag(bath_local_dim)) +
np.kron(spin_coupl.conj().T, fock.a(bath_local_dim)))
return [spin_loc] + bath_loc, [spin_bath_coupl] + bath_bath_coupl
def get_spin_boson_star_hamiltonian(omega_0, system_index, gamma, xi, bath_local_dim, finite_T=False):
"""
Returns local and coupling parts of the Spin-Boson model star Hamiltonian
used in Sec. 4.4.1 and 4.4.2 of the thesis.
:param omega_0: Spin energy
:param system_index: Index of the system in the auxiliary chain
:param gamma: System-Bath couplings
:param xi: Bath energies
:param bath_local_dim: Local dimension of the bath
:param finite_T: If set True uses the Hamiltonian for Sec. 4.4.2. If False builds the Hamiltonian for Sec. 4.4.1
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
if not finite_T:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.X
# Coupling between System and bath:
spin_coupl = pauli.Z
else:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.Z
# Coupling between System and bath:
spin_coupl = np.array([[0, 0], [1, 0]], dtype=np.complex128)
# Local Hamiltonian of the bath
fock_n = fock.n(bath_local_dim)
bath_loc = [energy * fock_n for energy in xi]
# Coupling operators for the bath to the left of the system
left_bath_coupling_op = np.kron(fock.a(bath_local_dim), spin_coupl.conj().T) + \
np.kron(fock.a_dag(bath_local_dim), spin_coupl)
left_bath_coupl = [coupling * left_bath_coupling_op for coupling in gamma[:system_index]]
# Coupling operators for the bath to the right of the system
right_bath_coupling_op = np.kron(spin_coupl.conj().T, fock.a(bath_local_dim)) + \
np.kron(spin_coupl, fock.a_dag(bath_local_dim))
right_bath_coupl = [coupling * right_bath_coupling_op for coupling in gamma[system_index:]]
return bath_loc[:system_index] + [spin_loc] + bath_loc[system_index:], left_bath_coupl + right_bath_coupl
def get_boson_boson_chain_hamiltonian(omega_0, c0, omega, t, cutoff_dim):
"""
Returns local and coupling parts of the Spin-Boson model chain Hamiltonian
used in Sec. 4.4.3 of the thesis.
:param omega_0: Spin energy
:param c0: Spin-Bath coupling
:param omega: Bath energies
:param t: Bath-bath couplings
:param cutoff_dim: Local dimension of the impurity and bath
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
# Local Hamiltonian of the System:
sys_loc = omega_0 * fock.n(cutoff_dim)
# Coupling between System and bath:
sys_coupl = fock.a(cutoff_dim)
# Local Hamiltonian of the bath
fock_n = fock.n(cutoff_dim)
bath_loc = [energy * fock_n for energy in omega]
# Bath coupling
bath_coupling_op = np.kron(fock.a(cutoff_dim), fock.a_dag(cutoff_dim)) + \
np.kron(fock.a_dag(cutoff_dim), fock.a(cutoff_dim))
bath_bath_coupl = [coupling * bath_coupling_op for coupling in t]
# Spin-Bath coupling
spin_bath_coupl = c0 * (np.kron(sys_coupl, fock.a_dag(cutoff_dim)) +
np.kron(sys_coupl.conj().T, fock.a(cutoff_dim)))
return [sys_loc] + bath_loc, [spin_bath_coupl] + bath_bath_coupl
def get_boson_boson_star_hamiltonian(omega_0, system_index, gamma, xi, cutoff_dim):
"""
Returns local and coupling parts of the Spin-Boson model star Hamiltonian
used in Sec. 4.4.3 of the thesis.
:param omega_0: Spin energy
:param system_index: Index of the system in the auxiliary chain
:param gamma: System-Bath couplings
:param xi: Bath energies
:param cutoff_dim: Local dimension of the impurity and bath
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
# Local Hamiltonian of the System:
sys_loc = omega_0 * fock.n(cutoff_dim)
# Coupling between System and bath:
sys_coupl = fock.a(cutoff_dim)
# Local Hamiltonian of the bath
fock_n = fock.n(cutoff_dim)
bath_loc = [energy * fock_n for energy in xi]
# Coupling operators for the bath to the left of the system
left_bath_coupling_op = np.kron(fock.a(cutoff_dim), sys_coupl.conj().T) + \
np.kron(fock.a_dag(cutoff_dim), sys_coupl)
left_bath_coupl = [coupling * left_bath_coupling_op for coupling in gamma[:system_index]]
# Coupling operators for the bath to the right of the system
right_bath_coupling_op = np.kron(sys_coupl.conj().T, fock.a(cutoff_dim)) + \
np.kron(sys_coupl, fock.a_dag(cutoff_dim))
right_bath_coupl = [coupling * right_bath_coupling_op for coupling in gamma[system_index:]]
return bath_loc[:system_index] + [sys_loc] + bath_loc[system_index:], left_bath_coupl + right_bath_coupl
| 42.791411
| 118
| 0.677993
| 1,077
| 6,975
| 4.178273
| 0.095636
| 0.031111
| 0.042667
| 0.046667
| 0.902889
| 0.898
| 0.894444
| 0.882222
| 0.843111
| 0.807778
| 0
| 0.013686
| 0.235269
| 6,975
| 162
| 119
| 43.055556
| 0.829959
| 0.385806
| 0
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.060606
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f08fc0d043f30f7b77fc0be9f310cc14919727ea
| 153
|
py
|
Python
|
pywi/processing/__init__.py
|
jeremiedecock/mrif
|
094b0dd81ff2be0e24bf3871caab48da1b5d138b
|
[
"MIT"
] | 1
|
2021-07-06T06:02:45.000Z
|
2021-07-06T06:02:45.000Z
|
pywi/processing/__init__.py
|
jeremiedecock/mrif
|
094b0dd81ff2be0e24bf3871caab48da1b5d138b
|
[
"MIT"
] | null | null | null |
pywi/processing/__init__.py
|
jeremiedecock/mrif
|
094b0dd81ff2be0e24bf3871caab48da1b5d138b
|
[
"MIT"
] | 1
|
2019-01-07T10:50:38.000Z
|
2019-01-07T10:50:38.000Z
|
"""Processing modules
This package contains image processing algorithms.
"""
from . import compositing
from . import filtering
from . import transform
| 17
| 50
| 0.784314
| 17
| 153
| 7.058824
| 0.705882
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150327
| 153
| 8
| 51
| 19.125
| 0.923077
| 0.457516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0a9b76e7527fc4562f1dffa72d9bb26c30f1073
| 30
|
py
|
Python
|
vggface/resnet50/__init__.py
|
claudiourbina/VGGFace
|
362cc8f805c1fd4135fddf8d602026735bcfdf5a
|
[
"MIT"
] | null | null | null |
vggface/resnet50/__init__.py
|
claudiourbina/VGGFace
|
362cc8f805c1fd4135fddf8d602026735bcfdf5a
|
[
"MIT"
] | null | null | null |
vggface/resnet50/__init__.py
|
claudiourbina/VGGFace
|
362cc8f805c1fd4135fddf8d602026735bcfdf5a
|
[
"MIT"
] | null | null | null |
from .resnet50 import ResNet50
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 0.1
| 30
| 1
| 30
| 30
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0a9ef787da7675bb95305830e5a4f6a73cec7a5
| 14,354
|
py
|
Python
|
supportal/tests/app/management/commands/test_email_users_with_expiring_assignments.py
|
Elizabeth-Warren/supportal-backend
|
e55b0e8fd154730bab1708f27386b2adcb18cfbc
|
[
"MIT"
] | 34
|
2020-03-27T14:59:04.000Z
|
2021-11-15T10:24:12.000Z
|
supportal/tests/app/management/commands/test_email_users_with_expiring_assignments.py
|
Elizabeth-Warren/supportal-backend
|
e55b0e8fd154730bab1708f27386b2adcb18cfbc
|
[
"MIT"
] | 5
|
2021-03-18T22:51:05.000Z
|
2022-02-10T15:03:33.000Z
|
supportal/tests/app/management/commands/test_email_users_with_expiring_assignments.py
|
Elizabeth-Warren/supportal-backend
|
e55b0e8fd154730bab1708f27386b2adcb18cfbc
|
[
"MIT"
] | 14
|
2020-03-27T17:36:39.000Z
|
2020-06-18T21:47:43.000Z
|
from datetime import datetime, timezone
from io import StringIO
from unittest import mock
import freezegun
import pytest
from django.conf import settings
from django.core.management import call_command
from django.utils import timezone
from model_bakery import baker
from supportal.app.common.enums import CanvassResult
from supportal.app.models import EmailSend
CREATED_AT = datetime(2019, 10, 26, 1, tzinfo=timezone.utc)
CREATED_AT_EARLIER = datetime(2019, 10, 26, tzinfo=timezone.utc)
DAY_BEFORE_EXPIRE = datetime(2019, 11, 1, tzinfo=timezone.utc)
TWO_DAY_BEFORE_EXPIRE = datetime(2019, 10, 31, tzinfo=timezone.utc)
EXPIRED_AT = datetime(2019, 11, 2, 1, tzinfo=timezone.utc)
EXPIRED_EARLIER = datetime(2019, 11, 2, tzinfo=timezone.utc)
AFTER_EXPIRATION_DATE = datetime(2019, 11, 3, tzinfo=timezone.utc)
SIX_DAYS_BEFORE_EXPIRE = datetime(2019, 10, 27, tzinfo=timezone.utc)
def email_expiring_users(*args, **kwargs):
call_command("email_users_with_expiring_assignments", **kwargs)
@pytest.fixture
def first_cambridge_assignment(cambridge_leader_user, cambridge_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=cambridge_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def hayes_assignment(hayes_valley_leader_user, california_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=california_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT_EARLIER
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def hayes_cambrdige_assignment(hayes_valley_leader_user, cambridge_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=cambridge_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def second_cambridge_assignment(cambridge_leader_user, california_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=california_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def expired_assignment(cambridge_leader_user, somerville_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=somerville_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.expired_at = EXPIRED_AT
cambridge_assignment.save()
return cambridge_assignment
DEFAULT_TEMPLATE_DATA = {
"assignment_count": "",
"email": "",
"expiration_date": "",
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": "",
"last_name": "",
}
def make_payload(assignment_count, email, expiration, first_name, last_name):
return {
"assignment_count": assignment_count,
"email": email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": first_name,
"last_name": last_name,
}
def check_email_sends(user, assignment_count, expiration, single_call_mock=None):
assert EmailSend.objects.filter(user=user).count() == 1
email_sent = EmailSend.objects.get(user=user)
assert email_sent.template_name == "expiring_contacts_email"
assert email_sent.payload == {
"assignment_count": assignment_count,
"email": user.email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": user.first_name,
"last_name": user.last_name,
}
if single_call_mock:
single_call_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
assignment_count,
user.email,
expiration,
user.first_name,
user.last_name,
)
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_uncontacted_assignments(
first_cambridge_assignment, expired_assignment
):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dryrun(first_cambridge_assignment, expired_assignment):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert first_cambridge_assignment.user.email in out.getvalue()
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(DAY_BEFORE_EXPIRE)
def test_dont_email_outside_of_two_days(first_cambridge_assignment, expired_assignment):
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_assignments(
first_cambridge_assignment, second_cambridge_assignment, expired_assignment
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 2, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
check_email_sends(hayes_assignment.user, 2, EXPIRED_EARLIER)
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
first_cambridge_assignment.user.email,
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
hayes_assignment.user.email,
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_send_all_to_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(
stdout=out, send=True, send_all_to="sgoldblatt+ts@elizabethwarren.com"
)
assert EmailSend.objects.all().count() == 0
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
"sgoldblatt+ts@elizabethwarren.com",
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
"sgoldblatt+ts@elizabethwarren.com",
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_limit_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, limit=1, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_unsuccessfully_contacted_assignments(
first_cambridge_assignment, expired_assignment
):
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.save()
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_unsubscribed_user(first_cambridge_assignment, expired_assignment):
first_cambridge_assignment.user.unsubscribed_at = datetime.now(tz=timezone.utc)
first_cambridge_assignment.user.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_user_who_was_emailed_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.EXPIRING_PROSPECTS,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 1
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_user_who_was_invited_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.INVITE_EMAIL,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 2
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_successfully_contacted_dont_email(
first_cambridge_assignment, expired_assignment
):
# Make sure that having a previous unsuccessful contact event doesn't cause
# the contact to get expired.
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.create_contact_event(
result=CanvassResult.SUCCESSFUL_CANVASSED
)
first_cambridge_assignment.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
def test_expire_zero_assignments():
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
| 33.933806
| 88
| 0.725442
| 1,719
| 14,354
| 5.710878
| 0.105294
| 0.11419
| 0.107569
| 0.0656
| 0.816237
| 0.759601
| 0.748599
| 0.736172
| 0.71203
| 0.700214
| 0
| 0.009099
| 0.188449
| 14,354
| 422
| 89
| 34.014218
| 0.833634
| 0.007036
| 0
| 0.680912
| 0
| 0
| 0.104
| 0.064281
| 0
| 0
| 0
| 0
| 0.116809
| 1
| 0.059829
| false
| 0
| 0.031339
| 0.002849
| 0.108262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0dfa6a70f2b05edad087e65290480ef3841bae8
| 911
|
py
|
Python
|
src/mdfserver/models.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | 2
|
2015-02-25T01:12:51.000Z
|
2017-02-08T22:54:41.000Z
|
src/mdfserver/models.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | 48
|
2015-01-12T18:01:56.000Z
|
2021-06-10T20:05:26.000Z
|
src/mdfserver/models.py
|
UCHIC/iUTAHData
|
4ffab29ad6b3313416bb2a8b98acf0b2e02c8cab
|
[
"Unlicense"
] | null | null | null |
from django.db import models
# from tinymce import models as tinymce_models
#
# # Create your models here.
#
# class Page(models.Model):
# title = models.CharField(max_length=200)
# url = models.CharField(max_length=200)
# content = models.TextField(max_length=20000) #tinymce_models.HTMLField()#forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) #Use the WYSIWYG editor in this field.
# def __unicode__(self):
# return self.title
#
# class Subpage(models.Model):
# title = models.CharField(max_length=200)
# url = models.CharField(max_length=200)
# url_visible = models.BooleanField()
# content = models.TextField(max_length=20000)#tinymce_models.HTMLField() #forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) #Use the WYSIWYG editor in this field.
# pages = models.ForeignKey(Page)
# def __unicode__(self):
# return self.title
| 45.55
| 181
| 0.70472
| 118
| 911
| 5.288136
| 0.389831
| 0.086538
| 0.115385
| 0.153846
| 0.754808
| 0.754808
| 0.657051
| 0.657051
| 0.657051
| 0.657051
| 0
| 0.039267
| 0.161361
| 911
| 20
| 182
| 45.55
| 0.777487
| 0.918771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0f736be57d784d43416f82badd0762190fea39d
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/57/99/fd/1d22e7d1fbf9ab07bcdf332318605c4de276c282734bf85d8c6421a6ce
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b058e034e1693406e04d820118fb4291f6395b3
| 4,213
|
py
|
Python
|
aries_cloudagent/protocols/present_proof/dif/tests/test_pres_request.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 4
|
2019-07-01T13:12:50.000Z
|
2019-07-02T20:01:37.000Z
|
aries_cloudagent/protocols/present_proof/dif/tests/test_pres_request.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 51
|
2021-01-12T05:50:50.000Z
|
2022-03-25T06:03:13.000Z
|
aries_cloudagent/protocols/present_proof/dif/tests/test_pres_request.py
|
kuraakhilesh8230/aries-cloudagent-python
|
ee384d1330f6a50ff45a507392ce54f92900f23a
|
[
"Apache-2.0"
] | 12
|
2019-06-24T22:17:44.000Z
|
2019-07-02T19:49:31.000Z
|
from unittest import TestCase
from ..pres_request_schema import DIFProofRequestSchema
class TestPresRequestSchema(TestCase):
"""DIF Presentation Request Test"""
def test_limit_disclosure(self):
test_pd_a = {
"options": {
"challenge": "3fa85f64-5717-4562-b3fc-2c963f66afa7",
"domain": "4jt78h47fh47",
},
"presentation_definition": {
"id": "32f54163-7166-48f1-93d8-ff217bdb0654",
"submission_requirements": [
{
"name": "Citizenship Information",
"rule": "pick",
"min": 1,
"from": "A",
}
],
"input_descriptors": [
{
"id": "citizenship_input_1",
"name": "EU Driver's License",
"group": ["A"],
"schema": [
{
"uri": "https://www.w3.org/2018/credentials#VerifiableCredential"
}
],
"constraints": {
"limit_disclosure": "required",
"fields": [
{
"path": ["$.credentialSubject.givenName"],
"purpose": "The claim must be from one of the specified issuers",
"filter": {
"type": "string",
"enum": ["JOHN", "CAI"],
},
}
],
},
}
],
},
}
test_pd_b = {
"options": {
"challenge": "3fa85f64-5717-4562-b3fc-2c963f66afa7",
"domain": "4jt78h47fh47",
},
"presentation_definition": {
"id": "32f54163-7166-48f1-93d8-ff217bdb0654",
"submission_requirements": [
{
"name": "Citizenship Information",
"rule": "pick",
"min": 1,
"from": "A",
}
],
"input_descriptors": [
{
"id": "citizenship_input_1",
"name": "EU Driver's License",
"group": ["A"],
"schema": [
{
"uri": "https://www.w3.org/2018/credentials#VerifiableCredential"
}
],
"constraints": {
"limit_disclosure": "preferred",
"fields": [
{
"path": ["$.credentialSubject.givenName"],
"purpose": "The claim must be from one of the specified issuers",
"filter": {
"type": "string",
"enum": ["JOHN", "CAI"],
},
}
],
},
}
],
},
}
pres_request_a = DIFProofRequestSchema().load(test_pd_a)
test_limit_disclosure_a = (
pres_request_a.presentation_definition.input_descriptors[
0
].constraint.limit_disclosure
)
assert test_limit_disclosure_a == "required"
pres_request_b = DIFProofRequestSchema().load(test_pd_b)
test_limit_disclosure_b = (
pres_request_b.presentation_definition.input_descriptors[
0
].constraint.limit_disclosure
)
assert test_limit_disclosure_b == "preferred"
| 38.651376
| 101
| 0.350344
| 236
| 4,213
| 6.042373
| 0.364407
| 0.09467
| 0.06662
| 0.039271
| 0.746143
| 0.746143
| 0.746143
| 0.746143
| 0.746143
| 0.746143
| 0
| 0.063492
| 0.551389
| 4,213
| 108
| 102
| 39.009259
| 0.691005
| 0.006883
| 0
| 0.54902
| 0
| 0
| 0.237913
| 0.070369
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.009804
| false
| 0
| 0.019608
| 0
| 0.039216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b1891b092271e40a79284fe9042e306de9c61a6
| 26,953
|
py
|
Python
|
qradar_utilities.py
|
intel471/titan_qradar_sync
|
43d2d2bfcd18c3383e8f4f0377788a0d2f3844a7
|
[
"MIT"
] | 1
|
2021-08-23T08:41:56.000Z
|
2021-08-23T08:41:56.000Z
|
qradar_utilities.py
|
intel471/titan_qradar_sync
|
43d2d2bfcd18c3383e8f4f0377788a0d2f3844a7
|
[
"MIT"
] | null | null | null |
qradar_utilities.py
|
intel471/titan_qradar_sync
|
43d2d2bfcd18c3383e8f4f0377788a0d2f3844a7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.8
import time
from typing import List, Dict
import json
import requests
from requests.exceptions import HTTPError
from urllib3.exceptions import InsecureRequestWarning
from json_utilities import json_get
from titan_qradar_sync_config import TitanQRadarSyncConfig
class QRadarUtilities:
def __init__(self, config: TitanQRadarSyncConfig):
self.config = config
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
def first_not_none_or_default(self, object_list: List, default):
result = default
try:
for item in object_list:
if item:
result = item
break
except Exception as e:
result = default
return result
def get_qradar_details(self) -> Dict:
qradar_details: Dict = None
try:
self.config.logger.info("Attempting to get QRadar details.")
request: str = self.config.qradar_base_url + "system/about"
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.get(request, headers=headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False)
if response.status_code == 200:
qradar_details = response.json()
else:
self.config.logger.info("Unable to obtain QRadar details.")
except HTTPError as http_err:
qradar_details = None
self.config.logger.error("Unable to get QRadar details: %s", {http_err})
except Exception as e:
qradar_details = None
self.config.logger.error("Unable to get QRadar details: %s", {e})
return qradar_details
def create_reference_set(self, set_name: str, element_type: str):
success: bool = True
try:
params: Dict = {
"name": set_name,
"element_type": element_type,
"time_to_live": self.config.qradar_reference_set_time_to_live,
"timeout_type": self.config.qradar_reference_set_timeout_type
}
self.config.logger.info("Attempting to create " + set_name + " reference set.")
request: str = self.config.qradar_base_url + "reference_data/sets"
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.post(request, headers=headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False, data=params)
if response.status_code == 201:
self.config.logger.info("Successfully created " + set_name + " reference set.")
else:
self.config.logger.info(response.content)
success = False
self.config.logger.info("Unable to create " + set_name + " reference set.")
except Exception as e:
success = False
self.config.logger.error("Unable to create " + set_name + " reference set: %s", {e})
return success
def create_reference_table(self, table_name: str):
success: bool = True
try:
key_name_types = (
"[{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Malware Family\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Malware Family Titan URL\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Type\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Indicator\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Indicator Titan URL\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Confidence Level\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Context\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"GIRs\"}, " +
"{\"element_type\": \"ALNIC\", " +
"\"key_name\": \"Mitre Tactics\"}, " +
"{\"element_type\": \"DATE\", " +
"\"key_name\": \"Activity First\"}, " +
"{\"element_type\": \"DATE\", " +
"\"key_name\": \"Activity Last\"}, " +
"{\"element_type\": \"DATE\", " +
"\"key_name\": \"Expires\"}]"
)
params: Dict = {
"name": table_name,
"outer_key_label": "UID",
"key_name_types": key_name_types,
"element_type": "ALNIC",
"time_to_live": self.config.qradar_reference_table_time_to_live,
"timeout_type": self.config.qradar_reference_table_timeout_type
}
self.config.logger.info("Attempting to create " + table_name + " reference table.")
request: str = self.config.qradar_base_url + "reference_data/tables"
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.post(request, headers=headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False, data=params)
if response.status_code == 201:
self.config.logger.info("Successfully created " + table_name + " reference table.")
else:
self.config.logger.info(response.content)
success = False
self.config.logger.info("Unable to create " + table_name + " reference table.")
except Exception as e:
success = False
self.config.logger.error("Unable to create " + table_name + " reference table: %s", {e})
return success
def check_create_reference_set(self, set_name: str, element_type: str) -> bool:
success: bool = True
try:
self.config.logger.info("Checking " + set_name + " reference set.")
request: str = self.config.qradar_base_url + "reference_data/sets/" + set_name
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.get(request, headers=headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False)
if response.status_code == 200:
self.config.logger.info(set_name + " reference set detected.")
else:
self.config.logger.info(set_name + " reference set not detected.")
success = self.create_reference_set(set_name, element_type)
except Exception as e:
success = False
self.config.logger.error("Unable to check/create reference set: %s", {e})
return success
def check_create_reference_table(self, table_name: str) -> bool:
success: bool = True
try:
self.config.logger.info("Checking " + table_name + " reference table.")
request: str = self.config.qradar_base_url + "reference_data/tables/" + table_name
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.get(request, headers= headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False)
if response.status_code == 200:
self.config.logger.info(table_name + " reference table detected.")
else:
self.config.logger.info(table_name + " reference table not detected.")
success = self.create_reference_table(table_name)
except Exception as e:
success = False
self.config.logger.error("Unable to check/create reference table: %s", {e})
return success
def check_create_reference_data_structures(self) -> bool:
success: bool = True
try:
# Reference sets.
if self.config.qradar_populate_malware_indicators_sets:
self.check_create_reference_set(self.config.qradar_malware_indicators_set_ip_medium_confidence, "IP")
self.check_create_reference_set(self.config.qradar_malware_indicators_set_ip_high_confidence, "IP")
self.check_create_reference_set(self.config.qradar_malware_indicators_set_hash_medium_confidence, "ALNIC")
self.check_create_reference_set(self.config.qradar_malware_indicators_set_hash_high_confidence, "ALNIC")
self.check_create_reference_set(self.config.qradar_malware_indicators_set_url_medium_confidence, "ALNIC")
self.check_create_reference_set(self.config.qradar_malware_indicators_set_url_high_confidence, "ALNIC")
# Reference tables.
if self.config.qradar_populate_malware_indicators_tables:
self.check_create_reference_table(self.config.qradar_malware_indicators_table)
except Exception as e:
success = False
self.config.logger.error("Unable to check/create reference data structures: %s", {e})
return success
def submit_indicator_batch_reference_set(self, indicator_batch_reference_set: List, set_name: str):
success: bool = True
try:
if len(indicator_batch_reference_set) > 0:
request: str = self.config.qradar_base_url + "reference_data/sets/bulk_load/" + set_name
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.post(request, headers=headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False, data=json.dumps(indicator_batch_reference_set))
if response.status_code == 200:
self.config.logger.info("Successfully submitted reference set indicator batch.")
else:
self.config.logger.info(response.content)
success = False
self.config.logger.info("Unable to submit reference set indicator batch.")
except Exception as e:
success = False
self.config.logger.error("Unable to submit reference set indicator batch: %s", {e})
return success
def submit_indicator_batch_reference_table(self, indicator_batch_reference_table: Dict, table_name: str):
success: bool = True
try:
request: str = self.config.qradar_base_url + "reference_data/tables/bulk_load/" + table_name
self.config.logger.info("Sending request: %s", request)
headers = {}
if self.config.qradar_user_agent:
headers = {"User-Agent": self.config.qradar_user_agent}
response = requests.post(request, headers=headers, auth=(self.config.qradar_username, self.config.qradar_password), verify=False, data=json.dumps(indicator_batch_reference_table))
if response.status_code == 200:
self.config.logger.info("Successfully submitted reference table indicator batch.")
else:
self.config.logger.info(response.content)
success = False
self.config.logger.info("Unable to submit reference table indicator batch.")
except Exception as e:
success = False
self.config.logger.error("Unable to submit reference table indicator batch: %s", {e})
return success
def create_indicator(self, indicator_context: str, indicator_type: str, indicator_girs: str, indicator_confidence_level: str, indicator_malware_family: str, indicator_malware_family_titan_url: str, indicator_expires: str, indicator_mitre_tactics: str, indicator_activity_first: str, indicator_activity_last: str, indicator_value: str, indicator_titan_url: str):
indicator: Dict = {}
try:
indicator = {
"Context": indicator_context,
"Type": indicator_type,
"GIRs": indicator_girs,
"Confidence Level": indicator_confidence_level,
"Malware Family": indicator_malware_family,
"Malware Family Titan URL": indicator_malware_family_titan_url,
"Expires": indicator_expires,
"Mitre Tactics": indicator_mitre_tactics,
"Indicator": indicator_value,
"Indicator Titan URL": indicator_titan_url,
"First Activity": indicator_activity_first,
"Last Activity": indicator_activity_last
}
except Exception as e:
indicator = {}
self.config.logger.error("Unable to create indicator: %s", {e})
return indicator
def process_indicators(self, indicators: List, reference_object_type: str) -> bool:
success: bool = True
try:
current_time: int = int(round(time.time() * 1000))
if reference_object_type == "Reference Sets":
indicator_batch_ip_medium = []
indicator_batch_ip_high = []
indicator_batch_hash_medium = []
indicator_batch_hash_high = []
indicator_batch_url_medium = []
indicator_batch_url_high = []
for indicator in indicators:
indicator_type: str = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_type"]), "")
indicator_confidence_level: str = self.first_not_none_or_default(json_get(indicator, ["data", "confidence"]), "")
indicator_expiration: int = self.first_not_none_or_default(json_get(indicator, ["data", "expiration"]), 0)
process_indicator: bool = True
if self.config.qradar_ignore_expired_malware_indicators_sets:
if indicator_expiration <= current_time:
process_indicator = False
if process_indicator:
if indicator_type == "ipv4":
indicator_value_ipv4 = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "address"]), "")
if indicator_value_ipv4:
if indicator_confidence_level == "high":
indicator_batch_ip_high.append(indicator_value_ipv4) if indicator_value_ipv4 not in indicator_batch_ip_high else indicator_batch_ip_high
else:
indicator_batch_ip_medium.append(indicator_value_ipv4) if indicator_value_ipv4 not in indicator_batch_ip_medium else indicator_batch_ip_medium
if indicator_type == "url":
indicator_value_url = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "url"]), "")
if indicator_value_url:
if indicator_confidence_level == "high":
indicator_batch_url_high.append(indicator_value_url) if indicator_value_url not in indicator_batch_url_high else indicator_batch_url_high
else:
indicator_batch_url_medium.append(indicator_value_url) if indicator_value_url not in indicator_batch_url_medium else indicator_batch_url_medium
if indicator_type == "file":
indicator_value_md5 = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "file", "md5"]), "")
if indicator_value_md5:
if indicator_confidence_level == "high":
indicator_batch_hash_high.append(indicator_value_md5) if indicator_value_md5 not in indicator_batch_hash_high else indicator_batch_hash_high
else:
indicator_batch_hash_medium.append(indicator_value_md5) if indicator_value_md5 not in indicator_batch_hash_medium else indicator_batch_hash_medium
indicator_value_sha1 = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "file", "sha1"]), "")
if indicator_value_sha1:
if indicator_confidence_level == "high":
indicator_batch_hash_high.append(indicator_value_sha1) if indicator_value_sha1 not in indicator_batch_hash_high else indicator_batch_hash_high
else:
indicator_batch_hash_medium.append(indicator_value_sha1) if indicator_value_sha1 not in indicator_batch_hash_medium else indicator_batch_hash_medium
indicator_value_sha256 = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "file", "sha256"]), "")
if indicator_value_sha256:
if indicator_confidence_level == "high":
indicator_batch_hash_high.append(indicator_value_sha256) if indicator_value_sha256 not in indicator_batch_hash_high else indicator_batch_hash_high
else:
indicator_batch_hash_medium.append(indicator_value_sha256) if indicator_value_sha256 not in indicator_batch_hash_medium else indicator_batch_hash_medium
if self.config.qradar_populate_malware_indicators_sets:
self.submit_indicator_batch_reference_set(indicator_batch_ip_medium, self.config.qradar_malware_indicators_set_ip_medium_confidence)
self.submit_indicator_batch_reference_set(indicator_batch_ip_high, self.config.qradar_malware_indicators_set_ip_high_confidence)
self.submit_indicator_batch_reference_set(indicator_batch_hash_medium, self.config.qradar_malware_indicators_set_hash_medium_confidence)
self.submit_indicator_batch_reference_set(indicator_batch_hash_high, self.config.qradar_malware_indicators_set_hash_high_confidence)
self.submit_indicator_batch_reference_set(indicator_batch_url_medium, self.config.qradar_malware_indicators_set_url_medium_confidence)
self.submit_indicator_batch_reference_set(indicator_batch_url_high, self.config.qradar_malware_indicators_set_url_high_confidence)
if reference_object_type == "Reference Tables":
indicator_batch = {}
for indicator in indicators:
indicator_uid_raw: str = self.first_not_none_or_default(json_get(indicator, ["data", "uid"]), "")
indicator_context: str = self.first_not_none_or_default(json_get(indicator, ["data", "context", "description"]), "")
indicator_type: str = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_type"]), "")
indicator_girs_list: str = self.first_not_none_or_default(json_get(indicator, ["data", "intel_requirements"]), [])
indicator_girs = ""
for gir in indicator_girs_list:
gir_name = ""
for gir_ref in self.config.girs:
if gir_ref[1] == gir:
gir_name = gir_ref[3]
break
if len(indicator_girs) > 0:
indicator_girs += ","
indicator_girs += "'" + gir + " - " + gir_name + "'"
indicator_girs = "[" + indicator_girs + "]"
indicator_confidence_level: str = self.first_not_none_or_default(json_get(indicator, ["data", "confidence"]), "")
indicator_malware_family: str = self.first_not_none_or_default(json_get(indicator, ["data", "threat", "data", "family"]), "")
indicator_malware_family_titan_url: str = self.config.titan_portal_base_url + "malware/" + self.first_not_none_or_default(json_get(indicator, ["data", "threat", "data", "malware_family_profile_uid"]), "")
indicator_expires: str = self.first_not_none_or_default(json_get(indicator, ["data", "expiration"]), "")
indicator_mitre_tactics: str = self.first_not_none_or_default(json_get(indicator, ["data", "mitre_tactics"]), "")
indicator_activity_first: str = self.first_not_none_or_default(json_get(indicator, ["activity", "first"]), "")
indicator_activity_last: str = self.first_not_none_or_default(json_get(indicator, ["activity", "last"]), "")
indicator_expiration: int = self.first_not_none_or_default(json_get(indicator, ["data", "expiration"]), 0)
process_indicator: bool = True
if self.config.qradar_ignore_expired_malware_indicators_tables:
if indicator_expiration <= current_time:
process_indicator = False
if process_indicator:
indicator_value: str = ""
if indicator_type == "ipv4":
indicator_value = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "address"]), "")
indicator_titan_url = self.config.titan_portal_base_url + "malware/indicator/" + self.first_not_none_or_default(json_get(indicator, ["uid"]), "")
indicator_uid = "ipv4-" + indicator_uid_raw
indicator_created = self.create_indicator(indicator_context, indicator_type, indicator_girs, indicator_confidence_level, indicator_malware_family, indicator_malware_family_titan_url, indicator_expires, indicator_mitre_tactics, indicator_activity_first, indicator_activity_last, indicator_value, indicator_titan_url)
if indicator_created:
indicator_batch[indicator_uid] = indicator_created
if indicator_type == "url":
indicator_value = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "url"]), "")
indicator_titan_url = self.config.titan_portal_base_url + "malware/indicator/" + self.first_not_none_or_default(json_get(indicator, ["uid"]), "")
indicator_uid = "url-" + indicator_uid_raw
indicator_created = self.create_indicator(indicator_context, indicator_type, indicator_girs, indicator_confidence_level, indicator_malware_family, indicator_malware_family_titan_url, indicator_expires, indicator_mitre_tactics, indicator_activity_first, indicator_activity_last, indicator_value, indicator_titan_url)
if indicator_created:
indicator_batch[indicator_uid] = indicator_created
if indicator_type == "file":
indicator_value = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "file", "md5"]), "")
indicator_titan_url = self.config.titan_portal_base_url + "malware/indicator/" + self.first_not_none_or_default(json_get(indicator, ["uid"]), "")
indicator_uid = "md5-" + indicator_uid_raw
indicator_created = self.create_indicator(indicator_context, indicator_type, indicator_girs, indicator_confidence_level, indicator_malware_family, indicator_malware_family_titan_url, indicator_expires, indicator_mitre_tactics, indicator_activity_first, indicator_activity_last, indicator_value, indicator_titan_url)
if indicator_created:
indicator_batch[indicator_uid] = indicator_created
indicator_value = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "file", "sha1"]), "")
indicator_titan_url = self.config.titan_portal_base_url + "malware/indicator/" + self.first_not_none_or_default(json_get(indicator, ["uid"]), "")
indicator_uid = "sha1-" + indicator_uid_raw
indicator_created = self.create_indicator(indicator_context, indicator_type, indicator_girs, indicator_confidence_level, indicator_malware_family, indicator_malware_family_titan_url, indicator_expires, indicator_mitre_tactics, indicator_activity_first, indicator_activity_last, indicator_value, indicator_titan_url)
if indicator_created:
indicator_batch[indicator_uid] = indicator_created
indicator_value = self.first_not_none_or_default(json_get(indicator, ["data", "indicator_data", "file", "sha256"]), "")
indicator_titan_url = self.config.titan_portal_base_url + "malware/indicator/" + self.first_not_none_or_default(json_get(indicator, ["uid"]), "")
indicator_uid = "sha256-" + indicator_uid_raw
indicator_created = self.create_indicator(indicator_context, indicator_type, indicator_girs, indicator_confidence_level, indicator_malware_family, indicator_malware_family_titan_url, indicator_expires, indicator_mitre_tactics, indicator_activity_first, indicator_activity_last, indicator_value, indicator_titan_url)
if indicator_created:
indicator_batch[indicator_uid] = indicator_created
if self.config.qradar_populate_malware_indicators_tables:
self.submit_indicator_batch_reference_table(indicator_batch, self.config.qradar_malware_indicators_table)
except Exception as e:
success = False
self.config.logger.error("Unable to process indicators: %s", {e})
return success
| 59.630531
| 365
| 0.61559
| 2,854
| 26,953
| 5.450596
| 0.058514
| 0.069427
| 0.060684
| 0.027899
| 0.84964
| 0.80162
| 0.764657
| 0.719401
| 0.702687
| 0.650489
| 0
| 0.00467
| 0.292992
| 26,953
| 451
| 366
| 59.762749
| 0.81166
| 0.002115
| 0
| 0.478261
| 0
| 0
| 0.0972
| 0.004871
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032609
| false
| 0.019022
| 0.021739
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9bcb2e6ea72e15c7c10a699c8ebe55ab4cd553e3
| 17,348
|
py
|
Python
|
model.py
|
bfMendonca/CarND-Behavioral-Cloning-P3
|
564b8e0c542292acdc6daf3829522cdcd98a1c95
|
[
"MIT"
] | null | null | null |
model.py
|
bfMendonca/CarND-Behavioral-Cloning-P3
|
564b8e0c542292acdc6daf3829522cdcd98a1c95
|
[
"MIT"
] | null | null | null |
model.py
|
bfMendonca/CarND-Behavioral-Cloning-P3
|
564b8e0c542292acdc6daf3829522cdcd98a1c95
|
[
"MIT"
] | null | null | null |
import csv
import cv2
import numpy as np
import pandas as pd
import sys
from datetime import datetime
from numpy.random import RandomState
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D
def DrivingNetV1():
model = Sequential()
model.add( Cropping2D( cropping=( (90,20), (0,0) ), input_shape=( 160, 320, 3 ) ) )
model.add( Lambda( lambda x: (x/255.0) - 0.5 ) )
model.add( Flatten( ) )
model.add( Dense(1) )
return model
def NVIDIANetV0( lr=1e-3):
model = Sequential( name="NVIDIANetV0" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear') )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV1( lr=1e-3):
model = Sequential( name="NVIDIANetV1" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='tanh' ) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV2( lr=1e-3):
model = Sequential( name="NVIDIANetV2" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV3( lr=1e-3):
model = Sequential( name="NVIDIANetV3" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV4( lr=1e-3):
model = Sequential( name="NVIDIANetV4" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dropout(0.125) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV5( lr=1e-3):
model = Sequential( name="NVIDIANetV5" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV6( lr=1e-3):
model = Sequential( name="NVIDIANetV6" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dropout(0.5) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV1( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV1" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(300, activation='tanh' ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear' ) )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV2( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV2" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
model.add( Dense(300, activation='linear' ) )
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear' ) )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV3( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV3" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
#Hyper parameters
BATCH_SIZE=64
LEARNING_RATE=1e-4
EPOCHS=5
model_name = sys.argv[1]
model = Sequential()
if( model_name == 'NVIDIANetV0'):
model = NVIDIANetV0( LEARNING_RATE )
elif( model_name == 'NVIDIANetV1'):
model = NVIDIANetV1( LEARNING_RATE )
elif( model_name == 'NVIDIANetV2' ):
model = NVIDIANetV2( LEARNING_RATE )
elif( model_name == 'NVIDIANetV3' ):
model = NVIDIANetV3( LEARNING_RATE )
elif( model_name == 'NVIDIANetV4' ):
model = NVIDIANetV4( LEARNING_RATE )
elif( model_name == 'NVIDIANetV5' ):
model = NVIDIANetV5( LEARNING_RATE )
elif( model_name == 'NVIDIANetV6' ):
model = NVIDIANetV6( LEARNING_RATE )
elif( model_name == 'ModNVIDIANetV1' ):
model = ModNVIDIANetV1( LEARNING_RATE )
elif( model_name == 'ModNVIDIANetV2' ):
model = ModNVIDIANetV2( LEARNING_RATE )
elif( model_name == 'ModNVIDIANetV3' ):
model = ModNVIDIANetV3( LEARNING_RATE )
else:
raise Exception('Invalid model name')
#Load data. Split data into train and validation
df = pd.read_csv('data/driving_log.csv', names=['center', 'left', 'right', 'measurement', '1', '2', '3'])
rng = RandomState()
train = df.sample( frac=0.7, random_state=rng )
valid = df.loc[~df.index.isin(train.index) ]
NUM_TRAIN_IMAGES = train.shape[0]
NUM_TEST_IMAGES = valid.shape[0]
#Deffining the generator
def load_data( df, batch_size, augument=False ):
i = 0
while True:
images = []
measurements = []
while len(images) < batch_size:
image_path = df.iloc[i,:]['center'].split('/')[-1]
current_path = './data/IMG/' + image_path
measurement = float( df.iloc[i,:]['measurement'] )
image = cv2.imread( current_path )
measurements.append( measurement )
images.append( image )
if( augument ):
flipped_image = cv2.flip( image, 1 )
images.append( flipped_image )
measurements.append( -1.0*measurement )
# image_path = df.iloc[i,:]['left'].split('/')[-1]
# current_path = './data/IMG/' + image_path
# measurement = float( +0.9 )
# image = cv2.imread( current_path )
# measurements.append( measurement )
# images.append( image )
# image_path = df.iloc[i,:]['right'].split('/')[-1]
# current_path = './data/IMG/' + image_path
# measurement = float( -0.9 )
# image = cv2.imread( current_path )
# measurements.append( measurement )
# images.append( image )
i += 1
if( i == df.shape[0] ):
i =0
yield ( np.array( images ), np.array( measurements ) )
#Define the generators
trainGen = load_data( train, BATCH_SIZE, True)
validGen = load_data( valid, BATCH_SIZE )
NUM_TRAIN_IMAGES = 2*NUM_TRAIN_IMAGES
NUM_TEST_IMAGES = NUM_TEST_IMAGES
print(model.summary())
#Using tensorboard
logdir = "logs/scalars/" + model.name
#defiining tensorboard callback
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
model.fit(
x=trainGen,
steps_per_epoch=NUM_TRAIN_IMAGES//BATCH_SIZE,
verbose=1,
validation_data=validGen,
validation_steps=NUM_TEST_IMAGES//BATCH_SIZE, epochs=EPOCHS,
callbacks=[tensorboard_callback] )
model.save( model.name + '.h5')
| 37.468683
| 112
| 0.654427
| 2,492
| 17,348
| 4.517255
| 0.08427
| 0.121524
| 0.062184
| 0.080839
| 0.822066
| 0.794617
| 0.770809
| 0.769299
| 0.7685
| 0.7685
| 0
| 0.056928
| 0.180828
| 17,348
| 462
| 113
| 37.549784
| 0.735205
| 0.198409
| 0
| 0.648649
| 0
| 0
| 0.073879
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.040541
| 0
| 0.118243
| 0.003378
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aca30b0e051054cf8605e9d7df37ed0c840f2b51
| 383
|
py
|
Python
|
for_fun/mul_and_div/mul_and_div.py
|
trisct/Software-Tutorials
|
50d7851b861700fe256dfed97f84dc321a5286dc
|
[
"CC0-1.0"
] | 2
|
2021-08-22T05:19:26.000Z
|
2021-12-21T12:03:57.000Z
|
for_fun/mul_and_div/mul_and_div.py
|
trisct/Software-Tutorials
|
50d7851b861700fe256dfed97f84dc321a5286dc
|
[
"CC0-1.0"
] | null | null | null |
for_fun/mul_and_div/mul_and_div.py
|
trisct/Software-Tutorials
|
50d7851b861700fe256dfed97f84dc321a5286dc
|
[
"CC0-1.0"
] | null | null | null |
import time
a = 3215.35127
b = 3.
start = time.time()
for i in range(100000000):
c = a / b
end = time.time()
time_elapsed = end - start
print('Time elapsed (div ver) = %.5f' % time_elapsed)
a = 3215.35127
b = 1./3.
start = time.time()
for i in range(100000000):
c = a * b
end = time.time()
time_elapsed = end - start
print('Time elapsed (mul ver) = %.5f' % time_elapsed)
| 17.409091
| 53
| 0.629243
| 65
| 383
| 3.646154
| 0.338462
| 0.202532
| 0.084388
| 0.092827
| 0.700422
| 0.700422
| 0.700422
| 0.700422
| 0.700422
| 0.700422
| 0
| 0.136213
| 0.214099
| 383
| 21
| 54
| 18.238095
| 0.651163
| 0
| 0
| 0.588235
| 0
| 0
| 0.151436
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.117647
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5a14c70626d442e04bd03fd58414f48d6a57094
| 119
|
py
|
Python
|
communication_modules/azure_iot_hub/__init__.py
|
dbenge/SimpleSensor_contrib
|
f48c31d3a0e0e29531ac5b0b445dccafd4f1e1d9
|
[
"Apache-2.0"
] | null | null | null |
communication_modules/azure_iot_hub/__init__.py
|
dbenge/SimpleSensor_contrib
|
f48c31d3a0e0e29531ac5b0b445dccafd4f1e1d9
|
[
"Apache-2.0"
] | 5
|
2018-07-22T03:06:33.000Z
|
2018-11-08T22:42:53.000Z
|
communication_modules/azure_iot_hub/__init__.py
|
dbenge/SimpleSensor_contrib
|
f48c31d3a0e0e29531ac5b0b445dccafd4f1e1d9
|
[
"Apache-2.0"
] | 3
|
2018-07-11T14:49:06.000Z
|
2022-03-24T18:31:26.000Z
|
from simplesensor.communication_modules.azure_iot_hub.azureIotHubModule import AzureIotHubModule as CommunicationModule
| 119
| 119
| 0.932773
| 12
| 119
| 9
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042017
| 119
| 1
| 119
| 119
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5aeb41622feaa3f7b1564f4b8743b3dd401d728
| 199
|
py
|
Python
|
nanodet/util/rank_filter.py
|
zjiao19/nanodet
|
17af4a81fa93e0405f3a9f8c8feb75ad7b9adc50
|
[
"Apache-2.0"
] | 8
|
2021-05-01T14:11:19.000Z
|
2022-01-11T01:08:35.000Z
|
nanodet/util/rank_filter.py
|
zjiao19/nanodet
|
17af4a81fa93e0405f3a9f8c8feb75ad7b9adc50
|
[
"Apache-2.0"
] | 1
|
2021-12-20T08:01:20.000Z
|
2021-12-20T08:01:20.000Z
|
nanodet/util/rank_filter.py
|
zjiao19/nanodet
|
17af4a81fa93e0405f3a9f8c8feb75ad7b9adc50
|
[
"Apache-2.0"
] | null | null | null |
def rank_filter(func):
def func_filter(local_rank=-1, *args, **kwargs):
if local_rank < 1:
return func(*args, **kwargs)
else:
pass
return func_filter
| 22.111111
| 52
| 0.557789
| 25
| 199
| 4.24
| 0.48
| 0.188679
| 0.188679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015038
| 0.331658
| 199
| 8
| 53
| 24.875
| 0.781955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
c5f1690fb5bdf9e0c170536780d8205e10259b5d
| 94
|
py
|
Python
|
backend/app/app/crud/__init__.py
|
luovkle/FastAPI-Note-Taking
|
317d92e75cbba3a6e633d6cf3d0bed0021412967
|
[
"MIT"
] | null | null | null |
backend/app/app/crud/__init__.py
|
luovkle/FastAPI-Note-Taking
|
317d92e75cbba3a6e633d6cf3d0bed0021412967
|
[
"MIT"
] | null | null | null |
backend/app/app/crud/__init__.py
|
luovkle/FastAPI-Note-Taking
|
317d92e75cbba3a6e633d6cf3d0bed0021412967
|
[
"MIT"
] | null | null | null |
from .crud_user import crud_user # noqa: F401
from .crud_note import crud_note # noqa: F401
| 31.333333
| 46
| 0.765957
| 16
| 94
| 4.25
| 0.4375
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.170213
| 94
| 2
| 47
| 47
| 0.794872
| 0.223404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6803550947b81cdf9de81ccf68c02ddd50ac556a
| 48
|
py
|
Python
|
bindings/pydeck/pydeck/exceptions/__init__.py
|
marsupialmarcos/deck.gl
|
c9867c1db87e492253865353f68c985019c7c613
|
[
"MIT"
] | null | null | null |
bindings/pydeck/pydeck/exceptions/__init__.py
|
marsupialmarcos/deck.gl
|
c9867c1db87e492253865353f68c985019c7c613
|
[
"MIT"
] | null | null | null |
bindings/pydeck/pydeck/exceptions/__init__.py
|
marsupialmarcos/deck.gl
|
c9867c1db87e492253865353f68c985019c7c613
|
[
"MIT"
] | null | null | null |
from .exceptions import PydeckException # noqa
| 24
| 47
| 0.8125
| 5
| 48
| 7.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 1
| 48
| 48
| 0.95122
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6814efafa8b0436d0c07705dbb35b7e3e7d2d5ab
| 61
|
py
|
Python
|
itscsapp/admision/models/__init__.py
|
danyRivC/itscsapp
|
485309f41f477fcebf66899740a0b4a954f4b98b
|
[
"MIT"
] | null | null | null |
itscsapp/admision/models/__init__.py
|
danyRivC/itscsapp
|
485309f41f477fcebf66899740a0b4a954f4b98b
|
[
"MIT"
] | null | null | null |
itscsapp/admision/models/__init__.py
|
danyRivC/itscsapp
|
485309f41f477fcebf66899740a0b4a954f4b98b
|
[
"MIT"
] | null | null | null |
from .admision_carrer import *
from .admision_event import *
| 20.333333
| 30
| 0.803279
| 8
| 61
| 5.875
| 0.625
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131148
| 61
| 2
| 31
| 30.5
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a83e29d363c3ca50085e97a39444917b28289d0b
| 26
|
py
|
Python
|
__init__.py
|
fleximus/pelican-fancyindex
|
305a953ed42c3b9f6c43dbd2d20751ac4f11deaf
|
[
"BSD-2-Clause"
] | null | null | null |
__init__.py
|
fleximus/pelican-fancyindex
|
305a953ed42c3b9f6c43dbd2d20751ac4f11deaf
|
[
"BSD-2-Clause"
] | null | null | null |
__init__.py
|
fleximus/pelican-fancyindex
|
305a953ed42c3b9f6c43dbd2d20751ac4f11deaf
|
[
"BSD-2-Clause"
] | null | null | null |
from .fancyindex import *
| 13
| 25
| 0.769231
| 3
| 26
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a88816b3f545c55918110797952195372fc30a92
| 1,929
|
py
|
Python
|
hexastore/bisect.py
|
alexchamberlain/mutant
|
3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9
|
[
"MIT"
] | 3
|
2019-06-15T13:13:39.000Z
|
2020-02-07T19:54:12.000Z
|
hexastore/bisect.py
|
alexchamberlain/mutant
|
3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9
|
[
"MIT"
] | 276
|
2019-07-03T06:18:37.000Z
|
2021-07-28T05:24:59.000Z
|
hexastore/bisect.py
|
alexchamberlain/mutant
|
3f4ec0df8b83b2de18766e2c9e1808cff4fd52a9
|
[
"MIT"
] | null | null | null |
"""Bisection algorithms."""
from typing import Callable, Optional, Sequence, TypeVar, cast
from .typing import Comparable
T = TypeVar("T")
U = TypeVar("U", bound=Comparable)
def bisect_left(
a: Sequence[T], x: T, lo: int = 0, hi: Optional[int] = None, key: Optional[Callable[[T], U]] = None
) -> int:
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if key is None:
key = cast(Callable[[T], U], lambda x: x)
if lo < 0:
raise ValueError("lo must be non-negative")
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if key(a[mid]) < key(x):
lo = mid + 1
else:
hi = mid
return lo
def bisect_right(
a: Sequence[T], x: T, lo: int = 0, hi: Optional[int] = None, key: Optional[Callable[[T], U]] = None
) -> int:
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if key is None:
key = cast(Callable[[T], U], lambda x: x)
if lo < 0:
raise ValueError("lo must be non-negative")
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo + hi) // 2
if key(x) < key(a[mid]):
hi = mid
else:
lo = mid + 1
return lo
bisect = bisect_right
| 25.72
| 103
| 0.573354
| 323
| 1,929
| 3.414861
| 0.232198
| 0.009066
| 0.036265
| 0.025385
| 0.75612
| 0.75612
| 0.75612
| 0.75612
| 0.75612
| 0.75612
| 0
| 0.007502
| 0.308968
| 1,929
| 74
| 104
| 26.067568
| 0.819955
| 0.384137
| 0
| 0.756757
| 0
| 0
| 0.042781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a888a17e5cc1d1b46ed3663d418f931e372c9caf
| 3,150
|
py
|
Python
|
test.py
|
eldinsahbaz/MetaheuristicOptimization
|
d553c4ea791e10b64384056927502717f5009378
|
[
"MIT"
] | 1
|
2019-02-22T18:26:55.000Z
|
2019-02-22T18:26:55.000Z
|
test.py
|
eldinsahbaz/MetaheuristicOptimization
|
d553c4ea791e10b64384056927502717f5009378
|
[
"MIT"
] | null | null | null |
test.py
|
eldinsahbaz/MetaheuristicOptimization
|
d553c4ea791e10b64384056927502717f5009378
|
[
"MIT"
] | null | null | null |
import PSO
import numpy as np
from pprint import pprint
from functools import partial
# Define the details of the table design problem
def objective_one(x):
i = 0.001
return -((1/((2*np.pi)**0.5))*np.exp(-0.5*((((x[0]-1.5)*(x[0]-1.5)+(x[1]-1.5)*(x[1]-1.5))/0.5)**1)) + (2/((2*np.pi)**0.5))*np.exp(-0.5*((((x[0]-0.5)*(x[0]-0.5)+(x[1]-0.5)*(x[1]-0.5))/i)**1)))
def sphere(x):
return np.sum(np.square(x))
num_variables = 2
upper_bounds = np.zeros(num_variables) + 10
lower_bounds = np.zeros(num_variables) - 10
max_velocity = (upper_bounds - lower_bounds) * 0.2
min_velocity = -max_velocity
inputs = {
'num_variables': num_variables,
'upper_bound': upper_bounds,
'lower_bound': lower_bounds,
'objective_function': partial(PSO.robust_variace_objective, objective_one),
'num_particles': 1000,
'max_iterations': 10,
'max_w': 0.9,
'min_w': 0.2,
'c1': 2,
'c2': 2,
'max_velocity': max_velocity,
'min_velocity': min_velocity,
'tolerance': 1e-2,
'patience': 3,
'disp': True
}
best_solns_one = list()
for i in range(10):
output, convergence_curve = PSO.PSO(**inputs)
best_solns_one.append(output)
num_variables = 2
upper_bounds = np.zeros(num_variables) + 10
lower_bounds = np.zeros(num_variables) - 10
max_velocity = (upper_bounds - lower_bounds) * 0.2
min_velocity = -max_velocity
inputs = {
'num_variables': num_variables,
'upper_bound': upper_bounds,
'lower_bound': lower_bounds,
'objective_function': partial(PSO.robust_variace_objective, objective_one),
'num_particles': 1000,
'max_iterations': 10,
'max_w': 0.9,
'min_w': 0.4,
'c1': 2,
'c2': 2,
'max_velocity': max_velocity,
'min_velocity': min_velocity,
'tolerance': 1e-2,
'patience': 3,
'disp': True
}
best_solns_two = list()
for i in range(10):
output, convergence_curve = PSO.PSO(**inputs)
best_solns_two.append(output)
print("The difference is significant" if PSO.compare_algorithms(best_solns_one, best_solns_two) < 0.05 else "The difference is not significant")
num_variables = 100
upper_bounds = np.zeros(num_variables) + 10
lower_bounds = np.zeros(num_variables) - 10
max_velocity = (upper_bounds - lower_bounds) * 0.2
min_velocity = -max_velocity
inputs = {
'num_variables': num_variables,
'upper_bound': upper_bounds,
'lower_bound': lower_bounds,
'objective_function': sphere,
'num_particles': 1000,
'max_iterations': 30,
'max_w': 0.9,
'min_w': 0.2,
'c1': 2,
'c2': 2,
'max_velocity': max_velocity,
'min_velocity': min_velocity,
'tolerance': 1e-2,
'patience': 3,
'disp': True
}
output, convergence_curve = PSO.PSO(**inputs)
pprint(output)
PSO.visualize_convergence(convergence_curve)
| 29.439252
| 195
| 0.583492
| 418
| 3,150
| 4.157895
| 0.203349
| 0.103567
| 0.044879
| 0.055236
| 0.770426
| 0.75374
| 0.722094
| 0.718067
| 0.718067
| 0.718067
| 0
| 0.054729
| 0.274921
| 3,150
| 106
| 196
| 29.716981
| 0.706217
| 0.014603
| 0
| 0.735632
| 0
| 0
| 0.154417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.045977
| 0.011494
| 0.091954
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8ab1507e7422c2e6e8504b138f80c7c058d5661
| 118
|
py
|
Python
|
Maths/__init__.py
|
NextLmabda/PyLambda
|
5fb91062c4f9c493fcd3637c2aa4d786f8c387d0
|
[
"MIT"
] | null | null | null |
Maths/__init__.py
|
NextLmabda/PyLambda
|
5fb91062c4f9c493fcd3637c2aa4d786f8c387d0
|
[
"MIT"
] | null | null | null |
Maths/__init__.py
|
NextLmabda/PyLambda
|
5fb91062c4f9c493fcd3637c2aa4d786f8c387d0
|
[
"MIT"
] | null | null | null |
print('Omolewa is teaching a class')
print('Lanre is still making changes')
print('Omolewa has made a change too')
| 29.5
| 39
| 0.737288
| 19
| 118
| 4.578947
| 0.736842
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161017
| 118
| 3
| 40
| 39.333333
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
764337fd61b07695cb56126ab069e65c8c4a854d
| 31
|
py
|
Python
|
relevanceai/_api/endpoints/admin/__init__.py
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 21
|
2021-11-23T13:01:36.000Z
|
2022-03-23T03:45:30.000Z
|
relevanceai/_api/endpoints/admin/__init__.py
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 217
|
2021-11-23T00:11:01.000Z
|
2022-03-30T08:11:49.000Z
|
relevanceai/_api/endpoints/admin/__init__.py
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 4
|
2022-01-04T01:48:30.000Z
|
2022-02-11T03:19:32.000Z
|
from .admin import AdminClient
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
76563bc9936580a8d4669bd7cefb4b1d15996d7b
| 36
|
py
|
Python
|
Allswap_djangoREST/backend/allswap/accounts/views.py
|
yds05238/AllSwap_Backend
|
95429fe6c709feef6b9e4b2349921e1cc4dd4c18
|
[
"MIT"
] | 2
|
2020-02-19T05:06:49.000Z
|
2020-02-20T17:34:41.000Z
|
Allswap_djangoREST/backend/allswap/accounts/views.py
|
yds05238/AllSwap_Backend
|
95429fe6c709feef6b9e4b2349921e1cc4dd4c18
|
[
"MIT"
] | 28
|
2020-06-05T20:52:59.000Z
|
2022-03-12T00:15:17.000Z
|
Allswap_djangoREST/backend/allswap/accounts/views.py
|
yds05238/AllSwap
|
95429fe6c709feef6b9e4b2349921e1cc4dd4c18
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
| 18
| 35
| 0.888889
| 5
| 36
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
765cc31257c572ee270073568f4fce119056c92f
| 4,533
|
py
|
Python
|
pinakes/common/auth/keycloak_django/tests/test_permission_checks.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 2
|
2022-03-17T18:53:58.000Z
|
2022-03-17T22:04:22.000Z
|
pinakes/common/auth/keycloak_django/tests/test_permission_checks.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 9
|
2022-03-18T08:22:57.000Z
|
2022-03-30T17:14:49.000Z
|
pinakes/common/auth/keycloak_django/tests/test_permission_checks.py
|
Alex-Izquierdo/pinakes
|
dfeb855662b47d29a6e976e87fd7c090a262cf3f
|
[
"Apache-2.0"
] | 7
|
2022-03-17T22:03:08.000Z
|
2022-03-28T21:28:34.000Z
|
from unittest import mock
from pinakes.common.auth.keycloak.models import (
AuthzPermission,
AuthzResource,
)
from pinakes.common.auth.keycloak_django.permissions import (
check_wildcard_permission,
check_resource_permission,
check_object_permission,
get_permitted_resources,
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_check_wildcard_permission(get_authz_client):
client = get_authz_client.return_value
client.check_permissions.return_value = True
result = check_wildcard_permission("myresource", "read", mock.Mock())
assert result is True
client.check_permissions.assert_called_once_with(
AuthzPermission("myresource:all", "myresource:read")
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_check_resource_permission(get_authz_client):
client = get_authz_client.return_value
client.check_permissions.return_value = True
result = check_resource_permission(
"myresource",
"myresource:1",
"read",
mock.Mock(),
)
assert result is True
client.check_permissions.assert_called_once_with(
[
AuthzPermission("myresource:all", "myresource:read"),
AuthzPermission("myresource:1", "myresource:read"),
]
)
@mock.patch(
"pinakes.common.auth.keycloak_django."
"permissions.check_wildcard_permission",
return_value=False,
)
@mock.patch(
"pinakes.common.auth.keycloak_django"
".permissions.check_resource_permission",
return_value=True,
)
def test_check_object_permission_exists(
check_resource_permission, check_wildcard_permission
):
obj = mock.Mock()
obj.keycloak_id = "598802c2-6266-40f0-9558-142e2cb0d98e"
obj.keycloak_type.return_value = "myresource"
obj.keycloak_name.return_value = "myresource:1"
request = mock.Mock()
assert check_object_permission(obj, "read", request) is True
check_resource_permission.assert_called_once_with(
"myresource", "myresource:1", "read", request
)
check_wildcard_permission.assert_not_called()
@mock.patch(
"pinakes.common.auth.keycloak_django"
".permissions.check_wildcard_permission",
return_value=True,
)
@mock.patch(
"pinakes.common.auth.keycloak_django"
".permissions.check_resource_permission",
return_value=False,
)
def test_check_object_permission_not_exists(
check_resource_permission, check_wildcard_permission
):
obj = mock.Mock()
obj.keycloak_id = None
obj.keycloak_type.return_value = "myresource"
request = mock.Mock()
assert check_object_permission(obj, "read", request) is True
check_wildcard_permission.assert_called_once_with(
"myresource", "read", request
)
check_resource_permission.assert_not_called()
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_get_permitted_resources_empty(get_authz_client):
client = get_authz_client.return_value
client.get_permissions.return_value = []
result = get_permitted_resources("myresource", "read", mock.Mock())
assert result.is_wildcard is False
assert result.items == []
client.get_permissions.assert_called_once_with(
AuthzPermission(scope="myresource:read")
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_get_permitted_resources_wildcard(get_authz_client):
client = get_authz_client.return_value
client.get_permissions.return_value = [
AuthzResource(rsid="0", rsname="myresource:all"),
AuthzResource(rsid="1", rsname="myresource:1"),
]
result = get_permitted_resources("myresource", "read", mock.Mock())
assert result.is_wildcard is True
assert result.items == ["1"]
client.get_permissions.assert_called_once_with(
AuthzPermission(scope="myresource:read")
)
@mock.patch("pinakes.common.auth.keycloak_django.permissions.get_authz_client")
def test_get_permitted_resources(get_authz_client):
client = get_authz_client.return_value
client.get_permissions.return_value = [
AuthzResource(rsid="1", rsname="myresource:1"),
AuthzResource(rsid="2", rsname="myresource:2"),
]
result = get_permitted_resources("myresource", "read", mock.Mock())
assert result.is_wildcard is False
assert result.items == ["1", "2"]
client.get_permissions.assert_called_once_with(
AuthzPermission(scope="myresource:read")
)
| 29.245161
| 79
| 0.736378
| 529
| 4,533
| 5.984877
| 0.113422
| 0.059065
| 0.06633
| 0.08686
| 0.837018
| 0.808907
| 0.730891
| 0.727732
| 0.727732
| 0.727732
| 0
| 0.010225
| 0.158615
| 4,533
| 154
| 80
| 29.435065
| 0.819874
| 0
| 0
| 0.474576
| 0
| 0
| 0.219943
| 0.142952
| 0
| 0
| 0
| 0
| 0.161017
| 1
| 0.059322
| false
| 0
| 0.025424
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76cc225335a2213744e19bb14468fe461b9bf959
| 179
|
py
|
Python
|
magi/agents/sac_ae/__init__.py
|
ethanluoyc/magi
|
2ef2ba60989a55ccf8c90ba74c8e712fe301e2fa
|
[
"Apache-2.0"
] | 86
|
2021-11-24T21:53:29.000Z
|
2022-03-27T13:35:45.000Z
|
magi/agents/sac_ae/__init__.py
|
ethanluoyc/magi
|
2ef2ba60989a55ccf8c90ba74c8e712fe301e2fa
|
[
"Apache-2.0"
] | 7
|
2021-11-26T17:23:29.000Z
|
2022-03-07T21:49:44.000Z
|
magi/agents/sac_ae/__init__.py
|
ethanluoyc/magi
|
2ef2ba60989a55ccf8c90ba74c8e712fe301e2fa
|
[
"Apache-2.0"
] | 3
|
2021-11-27T11:13:18.000Z
|
2022-01-24T14:38:53.000Z
|
"""SAC-AE agent."""
from magi.agents.sac_ae.agent import SACAEAgent
from magi.agents.sac_ae.agent import SACAEConfig
from magi.agents.sac_ae.networks import make_default_networks
| 35.8
| 61
| 0.826816
| 29
| 179
| 4.931034
| 0.413793
| 0.13986
| 0.20979
| 0.356643
| 0.552448
| 0.41958
| 0.41958
| 0
| 0
| 0
| 0
| 0
| 0.078212
| 179
| 4
| 62
| 44.75
| 0.866667
| 0.072626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4f15458885d2ae78814572df9599b9ae72b6ee6c
| 34
|
py
|
Python
|
app/mashaller/__init__.py
|
yntonfon/dashboard
|
287e7b2d895916102236243c1051da1e5ee3756e
|
[
"MIT"
] | null | null | null |
app/mashaller/__init__.py
|
yntonfon/dashboard
|
287e7b2d895916102236243c1051da1e5ee3756e
|
[
"MIT"
] | null | null | null |
app/mashaller/__init__.py
|
yntonfon/dashboard
|
287e7b2d895916102236243c1051da1e5ee3756e
|
[
"MIT"
] | null | null | null |
from .user import user_marshaller
| 17
| 33
| 0.852941
| 5
| 34
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f16139d6db319752d1b3230dd073a3e1e47c8fd
| 118
|
py
|
Python
|
runners/mlcube_singularity/mlcube_singularity/__init__.py
|
johnugeorge/mlcube
|
10bdfe859805aa8c868c5a4745259037e123e757
|
[
"Apache-2.0"
] | 83
|
2020-12-03T18:53:11.000Z
|
2022-03-24T11:58:11.000Z
|
runners/mlcube_singularity/mlcube_singularity/__init__.py
|
mlperf/mlbox
|
5623826bd9c1d60f082170aeffc9ff1ccda7a656
|
[
"Apache-2.0"
] | 100
|
2019-11-08T19:58:59.000Z
|
2020-11-19T05:47:12.000Z
|
runners/mlcube_singularity/mlcube_singularity/__init__.py
|
johnugeorge/mlcube
|
10bdfe859805aa8c868c5a4745259037e123e757
|
[
"Apache-2.0"
] | 15
|
2019-10-30T17:53:39.000Z
|
2020-10-31T15:07:38.000Z
|
def get_runner_class():
from mlcube_singularity.singularity_run import SingularityRun
return SingularityRun
| 19.666667
| 65
| 0.813559
| 13
| 118
| 7.076923
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152542
| 118
| 5
| 66
| 23.6
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f2da43ba15b54e4aa7a9570b02b17a2b0fc1c6e
| 22
|
py
|
Python
|
tt/maxvol/__init__.py
|
rballester/ttpy
|
a2fdf08fae9d34cb1e5ba28482e82e04b249911b
|
[
"MIT"
] | null | null | null |
tt/maxvol/__init__.py
|
rballester/ttpy
|
a2fdf08fae9d34cb1e5ba28482e82e04b249911b
|
[
"MIT"
] | null | null | null |
tt/maxvol/__init__.py
|
rballester/ttpy
|
a2fdf08fae9d34cb1e5ba28482e82e04b249911b
|
[
"MIT"
] | 1
|
2021-01-10T07:02:09.000Z
|
2021-01-10T07:02:09.000Z
|
from _maxvol import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f2eed39bc85b82594221090c77cb7382ee39bd1
| 169
|
py
|
Python
|
paxLibUL/convolution/__init__.py
|
PAX-ULaval/pax-libraries
|
60e065ef480d85a3c03cfad4d2bbc1a70632c98b
|
[
"MIT"
] | null | null | null |
paxLibUL/convolution/__init__.py
|
PAX-ULaval/pax-libraries
|
60e065ef480d85a3c03cfad4d2bbc1a70632c98b
|
[
"MIT"
] | null | null | null |
paxLibUL/convolution/__init__.py
|
PAX-ULaval/pax-libraries
|
60e065ef480d85a3c03cfad4d2bbc1a70632c98b
|
[
"MIT"
] | null | null | null |
# pylint: disable=wildcard-import
from .architectures import *
from .callbacks import *
from .datasets import *
from .visualisation import *
from .weights_init import *
| 24.142857
| 33
| 0.781065
| 20
| 169
| 6.55
| 0.55
| 0.381679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136095
| 169
| 6
| 34
| 28.166667
| 0.89726
| 0.183432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f309231478a6b77460cb55dce7f7772392ec78f
| 11,543
|
py
|
Python
|
mstrio/api/migration.py
|
czyzq/mstrio-py
|
b25fd19936b659d503a7eaaa96c8d0b4e118cb7c
|
[
"Apache-2.0"
] | 1
|
2022-02-15T13:18:04.000Z
|
2022-02-15T13:18:04.000Z
|
mstrio/api/migration.py
|
czyzq/mstrio-py
|
b25fd19936b659d503a7eaaa96c8d0b4e118cb7c
|
[
"Apache-2.0"
] | null | null | null |
mstrio/api/migration.py
|
czyzq/mstrio-py
|
b25fd19936b659d503a7eaaa96c8d0b4e118cb7c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional
import requests
from mstrio.connection import Connection
from mstrio.utils.error_handlers import ErrorHandler
@ErrorHandler(err_msg='Error while creating the package holder')
def create_package_holder(connection: Connection, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Create a new in-memory metadata package holder.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.post(
url=f'{connection.base_url}/api/packages',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while updating the package holder with id: {id}')
def update_package_holder(connection: Connection, body: dict, id: str,
project_id: Optional[str] = None, prefer: str = "respond-async",
error_msg: Optional[str] = None) -> requests.Response:
"""Fill the content of the in-memory metadata package holder per supplied
specification. Currently, it's only supported when the holder is empty.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
body (dict): dictionarized PackageConfig object (with `to_dict()`)
id (str): ID of the package to be updated
prefer (str, optional): API currently just supports asynchronous mode,
not support synchronous mode, so header parameter ‘Prefer’ must be set
to ‘respond-async’ in your request. Defaults to "respond-async".
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.put(
url=f'{connection.base_url}/api/packages/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
},
json=body
)
@ErrorHandler(err_msg='Error while downloading the package with id: {id}')
def download_package(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Download a package binary.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be downloaded.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/{id}/binary',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while uploading the package with id: {id}')
def upload_package(connection: Connection, id: str, file: bytes, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Upload package to sandbox directly.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be uploaded.
file (bytes): package in a format of a binary string.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.put(
url=f'{connection.base_url}/api/packages/{id}/binary',
headers={'X-MSTR-ProjectID': project_id},
files={'file': file}
)
@ErrorHandler(err_msg='Error while getting the package holder with id: {id}')
def get_package_holder(connection: Connection, id: str, project_id: Optional[str] = None,
show_content: bool = True,
error_msg: Optional[str] = None) -> requests.Response:
"""Get definition of a package, including package status and its detail
content.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be retrieved.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
show_content (bool, optional): Show package content or not. Defaults to
False.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/{id}',
headers={'X-MSTR-ProjectID': project_id},
params={'showContent': show_content}
)
@ErrorHandler(err_msg='Error while deleting the package holder with id: {id}')
def delete_package_holder(connection: Connection, id: str, project_id: Optional[str] = None,
prefer: str = 'respond-async',
error_msg: Optional[str] = None) -> requests.Response:
"""Delete the in-memory metadata package holder, releasing associated
Intelligence Server resources.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be deleted.
prefer (str, optional): API currently just supports asynchronous mode,
not support synchronous mode, so header parameter ‘Prefer’ must be set
to ‘respond-async’ in your request. Defaults to "respond-async".
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.delete(
url=f'{connection.base_url}/api/packages/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
}
)
@ErrorHandler(err_msg='Error while creating the import for package holder with id: {id}')
def create_import(connection: Connection, id: str, project_id: Optional[str] = None,
generate_undo: bool = False,
error_msg: Optional[str] = None) -> requests.Response:
"""Create a package import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package for which import process will be
created.
generate_undo (bool, optional): Generate undo package or not. Defaults
to False.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
# TODO: Change to a parameter when any other values are supported
prefer = 'respond-async'
project_id = project_id if project_id is not None else connection.project_id
return connection.post(
url=f'{connection.base_url}/api/packages/imports',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
},
params={
'packageId': id,
'generateUndo': generate_undo
},
)
@ErrorHandler(err_msg='Error while getting the import with id: {id}')
def get_import(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Get result of a package import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/imports/{id}',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while deleting the import with id: {id}')
def delete_import(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Closes an existing import process previously created.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
# TODO: Change to a parameter when any other values are supported
prefer = 'respond-async'
project_id = project_id if project_id is not None else connection.project_id
return connection.delete(
url=f'{connection.base_url}/api/packages/imports/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
}
)
@ErrorHandler(err_msg='Error while creating the undo for import with id: {id}')
def create_undo(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Download undo package binary for this import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/imports/{id}/undoPackage/binary',
headers={'X-MSTR-ProjectID': project_id}
)
| 40.644366
| 98
| 0.664559
| 1,439
| 11,543
| 5.239055
| 0.109798
| 0.083565
| 0.045099
| 0.053057
| 0.867224
| 0.842287
| 0.820533
| 0.783924
| 0.768404
| 0.761507
| 0
| 0
| 0.248116
| 11,543
| 283
| 99
| 40.787986
| 0.868648
| 0.472927
| 0
| 0.522936
| 0
| 0
| 0.218806
| 0.080007
| 0
| 0
| 0
| 0.007067
| 0
| 1
| 0.091743
| false
| 0
| 0.137615
| 0
| 0.321101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f3cddc3d48976524af037fa191a969e354eac83
| 193
|
py
|
Python
|
keymaster/__main__.py
|
shiroyuki/spymaster
|
1efee54427378394ab04d0e53247eb38c28bc97c
|
[
"Apache-2.0"
] | null | null | null |
keymaster/__main__.py
|
shiroyuki/spymaster
|
1efee54427378394ab04d0e53247eb38c28bc97c
|
[
"Apache-2.0"
] | null | null | null |
keymaster/__main__.py
|
shiroyuki/spymaster
|
1efee54427378394ab04d0e53247eb38c28bc97c
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
sys.path.insert(0, os.path.join(os.getcwd(), '..', 'Imagination'))
sys.path.insert(0, os.path.join(os.getcwd(), '..', 'xmode'))
from keymaster.starter import activate
activate()
| 32.166667
| 66
| 0.689119
| 29
| 193
| 4.586207
| 0.482759
| 0.105263
| 0.195489
| 0.210526
| 0.481203
| 0.481203
| 0.481203
| 0.481203
| 0.481203
| 0
| 0
| 0.011299
| 0.082902
| 193
| 6
| 67
| 32.166667
| 0.740113
| 0
| 0
| 0
| 0
| 0
| 0.103093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4f9120612f328e8baf028521cfe58288d762c155
| 134
|
py
|
Python
|
hec_gnn/single_model/__init__.py
|
zlinaf/PowerGear
|
51ab67a7e2a2f4833de5196bb8aac57eaf77db69
|
[
"MIT"
] | 8
|
2022-03-11T03:29:15.000Z
|
2022-03-27T07:39:48.000Z
|
hec_gnn/single_model/__init__.py
|
zlinaf/PowerGear
|
51ab67a7e2a2f4833de5196bb8aac57eaf77db69
|
[
"MIT"
] | null | null | null |
hec_gnn/single_model/__init__.py
|
zlinaf/PowerGear
|
51ab67a7e2a2f4833de5196bb8aac57eaf77db69
|
[
"MIT"
] | 3
|
2022-03-11T02:30:24.000Z
|
2022-03-11T02:35:26.000Z
|
import sys
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_DIR)
| 33.5
| 87
| 0.791045
| 24
| 134
| 4.166667
| 0.416667
| 0.24
| 0.39
| 0.45
| 0.45
| 0.45
| 0.45
| 0.45
| 0
| 0
| 0
| 0
| 0.052239
| 134
| 4
| 88
| 33.5
| 0.787402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
96d45139aca244273384a51c530c63b4656ed83e
| 72
|
py
|
Python
|
tests/test_main.py
|
joaogcs/python-project-template
|
079f1606a474e155449ccd29da5970f571bf97a8
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
joaogcs/python-project-template
|
079f1606a474e155449ccd29da5970f571bf97a8
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
joaogcs/python-project-template
|
079f1606a474e155449ccd29da5970f571bf97a8
|
[
"MIT"
] | null | null | null |
# This is a sample Python script.
def test_print_hi():
assert True
| 14.4
| 33
| 0.708333
| 12
| 72
| 4.083333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 72
| 4
| 34
| 18
| 0.875
| 0.430556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
96f32ed1cb808038241c0062fb82667d9d2ca2d6
| 188
|
py
|
Python
|
webs/douban/tasks/__init__.py
|
billvsme/videoSpider
|
e19111cc48d0a2a44c5245b0ddc9fad0c7a1824d
|
[
"MIT"
] | 216
|
2016-02-20T12:46:43.000Z
|
2022-02-23T07:07:00.000Z
|
webs/douban/tasks/__init__.py
|
billvsme/tvCrawlers
|
e19111cc48d0a2a44c5245b0ddc9fad0c7a1824d
|
[
"MIT"
] | 3
|
2016-05-06T05:04:17.000Z
|
2021-12-13T19:41:39.000Z
|
webs/douban/tasks/__init__.py
|
billvsme/tvCrawlers
|
e19111cc48d0a2a44c5245b0ddc9fad0c7a1824d
|
[
"MIT"
] | 99
|
2016-02-20T08:34:00.000Z
|
2022-02-10T20:52:01.000Z
|
from . import get_main_movies_base_data
from . import get_main_movies_full_data
from . import get_celebrities_full_data
from . import down_video_images
from . import down_celebrity_images
| 31.333333
| 39
| 0.867021
| 30
| 188
| 4.933333
| 0.433333
| 0.337838
| 0.263514
| 0.22973
| 0.310811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 188
| 5
| 40
| 37.6
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4fdf21ff4984773ad50f5c25c0c1fd4ddfd99a2a
| 37
|
py
|
Python
|
mmo_module/__init__.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 1
|
2021-12-12T02:50:20.000Z
|
2021-12-12T02:50:20.000Z
|
mmo_module/__init__.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 17
|
2020-02-07T23:40:36.000Z
|
2020-12-22T16:38:44.000Z
|
mmo_module/__init__.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | null | null | null |
from mmo_module.mmo import MMOModule
| 18.5
| 36
| 0.864865
| 6
| 37
| 5.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4fe89fe9a8bdb2830db5052a284915883fc9077c
| 199
|
py
|
Python
|
bin/createLinkograph.py
|
mikiec84/linkshop
|
72959ceca0003be226edeca6496f915502831596
|
[
"Apache-2.0"
] | 6
|
2017-07-18T15:28:33.000Z
|
2020-03-03T14:45:45.000Z
|
bin/createLinkograph.py
|
mikiec84/linkshop
|
72959ceca0003be226edeca6496f915502831596
|
[
"Apache-2.0"
] | null | null | null |
bin/createLinkograph.py
|
mikiec84/linkshop
|
72959ceca0003be226edeca6496f915502831596
|
[
"Apache-2.0"
] | 3
|
2017-09-09T00:36:48.000Z
|
2020-03-03T14:45:49.000Z
|
#!/usr/bin/env python3
"""Command-line wrapper for linkoCreate.cli_createLinko."""
import loadPath # Adds the project path.
import linkograph.linkoCreate
linkograph.linkoCreate.cli_createLinko()
| 22.111111
| 59
| 0.79397
| 24
| 199
| 6.5
| 0.75
| 0.179487
| 0.320513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005587
| 0.100503
| 199
| 8
| 60
| 24.875
| 0.865922
| 0.492462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b2de91bfb8d50062a3db4c0b2f2027c54e1a627
| 49
|
py
|
Python
|
specviz/plugins/unit_change/__init__.py
|
ibusko/specviz
|
b8bcd495e5b43dc2b90f7bf2d5bad2d27c6990aa
|
[
"BSD-3-Clause"
] | null | null | null |
specviz/plugins/unit_change/__init__.py
|
ibusko/specviz
|
b8bcd495e5b43dc2b90f7bf2d5bad2d27c6990aa
|
[
"BSD-3-Clause"
] | null | null | null |
specviz/plugins/unit_change/__init__.py
|
ibusko/specviz
|
b8bcd495e5b43dc2b90f7bf2d5bad2d27c6990aa
|
[
"BSD-3-Clause"
] | null | null | null |
from .unit_change_dialog import UnitChangeDialog
| 24.5
| 48
| 0.897959
| 6
| 49
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c761a408113359da777a62cae459343ebc003d1
| 187
|
py
|
Python
|
ros/genpy/src/genpy/msg/__init__.py
|
numberen/apollo-platform
|
8f359c8d00dd4a98f56ec2276c5663cb6c100e47
|
[
"Apache-2.0"
] | 742
|
2017-07-05T02:49:36.000Z
|
2022-03-30T12:55:43.000Z
|
ros/genpy/src/genpy/msg/__init__.py
|
numberen/apollo-platform
|
8f359c8d00dd4a98f56ec2276c5663cb6c100e47
|
[
"Apache-2.0"
] | 73
|
2017-07-06T12:50:51.000Z
|
2022-03-07T08:07:07.000Z
|
ros/genpy/src/genpy/msg/__init__.py
|
numberen/apollo-platform
|
8f359c8d00dd4a98f56ec2276c5663cb6c100e47
|
[
"Apache-2.0"
] | 425
|
2017-07-04T22:03:29.000Z
|
2022-03-29T06:59:06.000Z
|
from ._TestFillEmbedTime import *
from ._TestFillSimple import *
from ._TestManyFields import *
from ._TestMsgArray import *
from ._TestPrimitiveArray import *
from ._TestString import *
| 26.714286
| 34
| 0.807487
| 18
| 187
| 8.055556
| 0.444444
| 0.344828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128342
| 187
| 6
| 35
| 31.166667
| 0.889571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c76ebd85af1120266012ad23762034711833042
| 4,405
|
py
|
Python
|
gamestonk_terminal/etf/financedatabase_view.py
|
i2infinity/GamestonkTerminal
|
abf79a5249930e5a9f5d2a1c4ba64590888ecef5
|
[
"MIT"
] | 1
|
2021-12-31T04:10:42.000Z
|
2021-12-31T04:10:42.000Z
|
gamestonk_terminal/etf/financedatabase_view.py
|
greggorrell/GamestonkTerminal
|
caa2c88c1259967b55a7565c7ce5fb1014f39e68
|
[
"MIT"
] | 1
|
2022-03-29T13:45:05.000Z
|
2022-03-29T13:45:05.000Z
|
gamestonk_terminal/etf/financedatabase_view.py
|
greggorrell/GamestonkTerminal
|
caa2c88c1259967b55a7565c7ce5fb1014f39e68
|
[
"MIT"
] | 1
|
2021-06-20T02:42:40.000Z
|
2021-06-20T02:42:40.000Z
|
"""Finance Database view"""
__docformat__ = "numpy"
import os
import pandas as pd
from tabulate import tabulate
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.etf import financedatabase_model
from gamestonk_terminal.helper_funcs import export_data
def display_etf_by_name(
name: str,
limit: int,
export: str = "",
):
"""Display a selection of ETFs based on name filtered by total assets. [Source: Finance Database]
Parameters
----------
name: str
Search by name to find ETFs matching the criteria.
limit: int
Limit of ETFs to display
export: str
Type of format to export data
"""
data = financedatabase_model.get_etfs_by_name(name)
if not data:
print("No data was found with that name\n")
return
tabulate_data = pd.DataFrame(data).T[
["long_name", "family", "category", "total_assets"]
]
tabulate_data_sorted = tabulate_data.sort_values(by="total_assets", ascending=False)
tabulate_data_sorted["total_assets"] = tabulate_data_sorted["total_assets"] / 1e6
if gtff.USE_TABULATE_DF:
print(
tabulate(
tabulate_data_sorted.iloc[:limit],
showindex=True,
headers=["Name", "Family", "Category", "Total Assets [M]"],
floatfmt=".2f",
tablefmt="fancy_grid",
),
"\n",
)
else:
print(tabulate_data_sorted.iloc[:limit].to_string(), "\n")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "ln_fd", data)
def display_etf_by_description(
description: str,
limit: int,
export: str = "",
):
"""Display a selection of ETFs based on description filtered by total assets. [Source: Finance Database]
Parameters
----------
description: str
Search by description to find ETFs matching the criteria.
limit: int
Limit of ETFs to display
export: str
Type of format to export data
"""
data = financedatabase_model.get_etfs_by_description(description)
if not data:
print("No data was found with that description\n")
return
tabulate_data = pd.DataFrame(data).T[
["long_name", "family", "category", "total_assets"]
]
tabulate_data_sorted = tabulate_data.sort_values(by="total_assets", ascending=False)
tabulate_data_sorted["total_assets"] = tabulate_data_sorted["total_assets"] / 1e6
if gtff.USE_TABULATE_DF:
print(
tabulate(
tabulate_data_sorted.iloc[:limit],
showindex=True,
headers=["Name", "Family", "Category", "Total Assets [M]"],
floatfmt=".2f",
tablefmt="fancy_grid",
),
"\n",
)
else:
print(tabulate_data_sorted.iloc[:limit].to_string(), "\n")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "ld", data)
def display_etf_by_category(
category: str,
limit: int,
export: str = "",
):
"""Display a selection of ETFs based on a category filtered by total assets. [Source: Finance Database]
Parameters
----------
description: str
Search by description to find ETFs matching the criteria.
limit: int
Limit of ETFs to display
export: str
Type of format to export data
"""
data = financedatabase_model.get_etfs_by_category(category)
if not data:
print("No data was found on that category\n")
return
tabulate_data = pd.DataFrame(data).T[
["long_name", "family", "category", "total_assets"]
]
tabulate_data_sorted = tabulate_data.sort_values(by="total_assets", ascending=False)
tabulate_data_sorted["total_assets"] = tabulate_data_sorted["total_assets"] / 1e6
if gtff.USE_TABULATE_DF:
print(
tabulate(
tabulate_data_sorted.iloc[:limit],
showindex=True,
headers=["Name", "Family", "Category", "Total Assets [M]"],
floatfmt=".2f",
tablefmt="fancy_grid",
),
"\n",
)
else:
print(tabulate_data_sorted.iloc[:limit].to_string(), "\n")
export_data(
export,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "screener"),
"sbc",
data,
)
| 29.965986
| 108
| 0.613167
| 518
| 4,405
| 4.994208
| 0.187259
| 0.09741
| 0.104368
| 0.053344
| 0.841129
| 0.819095
| 0.819095
| 0.807499
| 0.776575
| 0.776575
| 0
| 0.002818
| 0.274915
| 4,405
| 146
| 109
| 30.171233
| 0.807138
| 0.197276
| 0
| 0.65625
| 0
| 0
| 0.146886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.125
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
506c907cacd0c8e8e93832785451ebe1451a8d30
| 175
|
py
|
Python
|
malpi/dkwm/gym_envs/__init__.py
|
Bleyddyn/malpi
|
9315f19366bd56da12c6dc7a84d830bbec530753
|
[
"MIT"
] | 5
|
2017-03-27T22:15:54.000Z
|
2022-01-19T23:46:46.000Z
|
malpi/dkwm/gym_envs/__init__.py
|
Bleyddyn/malpi
|
9315f19366bd56da12c6dc7a84d830bbec530753
|
[
"MIT"
] | 10
|
2017-01-19T19:22:06.000Z
|
2022-02-27T21:29:50.000Z
|
malpi/dkwm/gym_envs/__init__.py
|
Bleyddyn/malpi
|
9315f19366bd56da12c6dc7a84d830bbec530753
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
from malpi.dkwm.gym_envs.dkwm_env import DKWMEnv
register(
id='dkwm-v0',
entry_point='malpi.dkwm.gym_envs:DKWMEnv',
)
| 19.444444
| 48
| 0.742857
| 26
| 175
| 4.846154
| 0.538462
| 0.166667
| 0.190476
| 0.253968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006711
| 0.148571
| 175
| 8
| 49
| 21.875
| 0.838926
| 0
| 0
| 0
| 0
| 0
| 0.194286
| 0.154286
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
507ab70263733fd377475d2677b804a7e98a6466
| 350
|
py
|
Python
|
RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
import TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi
#PropagatorWithMaterialESProducer
oppositeToMomElePropagator = TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi.OppositeMaterialPropagator.clone(
Mass = 0.000511,
ComponentName = 'oppositeToMomElePropagator'
)
| 38.888889
| 123
| 0.845714
| 24
| 350
| 12.25
| 0.708333
| 0.190476
| 0.367347
| 0.387755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022293
| 0.102857
| 350
| 8
| 124
| 43.75
| 0.914013
| 0.091429
| 0
| 0
| 0
| 0
| 0.082278
| 0.082278
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
50f08891336f7e5545f07eea041b3eca967010af
| 42
|
py
|
Python
|
python/testData/requirement/generation/keepMatchingVersion/main.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/requirement/generation/keepMatchingVersion/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/requirement/generation/keepMatchingVersion/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 1
|
2020-03-10T02:53:51.000Z
|
2020-03-10T02:53:51.000Z
|
import django
import numpy
import requests
| 14
| 15
| 0.880952
| 6
| 42
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 3
| 15
| 14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fed33d3830cf5a78951341d11b17322020b73f1
| 18,691
|
py
|
Python
|
datasets/ner_dataset.py
|
saiajaym/MetaLearningForNER
|
88009aa478645685e5bacef762e896c9ca1ecad9
|
[
"MIT"
] | 3
|
2021-05-12T15:16:02.000Z
|
2021-11-02T05:23:56.000Z
|
datasets/ner_dataset.py
|
saiajaym/MetaLearningForNER
|
88009aa478645685e5bacef762e896c9ca1ecad9
|
[
"MIT"
] | 2
|
2021-08-07T01:59:57.000Z
|
2022-03-23T09:46:30.000Z
|
datasets/ner_dataset.py
|
saiajaym/MetaLearningForNER
|
88009aa478645685e5bacef762e896c9ca1ecad9
|
[
"MIT"
] | 2
|
2020-12-27T22:31:14.000Z
|
2021-04-02T17:36:35.000Z
|
import itertools
import json
import os
import random
from collections import defaultdict, Counter
from tqdm.auto import tqdm, trange
from torch.nn import CrossEntropyLoss
from torch.utils import data
import numpy as np
from datasets import utils
class NERSampler:
def __init__(self, dataset, labels, label_map, n_cls, n_shot, n_query=5, n_batch=100):
print (f'Number of examples in NER dataset is {len(dataset)}')
self.labels = labels
self.classes = set()
for lab in labels:
if len(lab) > 2:
self.classes.add(lab[2:])
self.label_map = label_map
self.n_cls = n_cls
self.n_shot = n_shot
self.n_query = n_query
self.n_batch = n_batch
self.dataset = dataset
print ("{}-way {}-shot with {}-query and {} batchsize".format(self.n_cls, self.n_shot, self.n_query, self.n_batch))
self.sent_class_map, self.class_sent_map = self._get_sent_class_maps(dataset)
# stats on data
print ('## STATISTICS ##')
for cls in self.class_sent_map:
print (cls, len(self.class_sent_map[cls]))
self.data = self.make_episodes()
def make_episodes(self):
"""
Sample mini-batches for episode training
"""
tags_epi, sup_epi, query_epi = [], [], []
for _ in trange(self.n_batch):
classes = self._sample_classes()
# print ("sampled classes", classes)
tags = defaultdict(lambda:-1)
tags['O'] = 0
for cls in classes:
if cls not in tags:
tags[cls] = len(tags)
sup_sents, query_sents = self.sample_sentences(classes, tags)
# print ('sampled support labels', sup_sents.labels)
# print ('sampled query labels', query_sents.labels)
tags_epi.append(tags)
sup_epi.append(sup_sents)
query_epi.append(query_sents)
return tags_epi, sup_epi, query_epi
def __getitem__(self, index):
return self.data[0][index], self.data[1][index], self.data[2][index]
def __len__(self):
return self.n_batch
@staticmethod
def _get_sent_class_maps(dataset):
# map from a sentence Id to a list of pairs with
# B-Xs and the freqs of B-X in the sentence
sent_class_map = defaultdict(list)
# map from B-X to a list of pairs with
# sentence ids and the freqs of B-X in the sentence
class_sent_map = defaultdict(list)
for i, sent in enumerate(dataset):
_, tags = sent.words, sent.labels
class_freqs = Counter()
for tag in tags:
if tag.startswith('B-'):
# we only store the `X` part of `B-X`
class_freqs[tag[2:]] += 1
for cls, freq in class_freqs.items():
sent_class_map[i].append((cls, freq))
class_sent_map[cls].append((i, freq))
return sent_class_map, class_sent_map
def tagged_labels(self, labels, tags):
t_labels = []
for lab in labels:
if len(lab) > 2: lab = lab[2:]
if lab not in tags:
t_labels.append(-1)
else:
t_labels.append(tags[lab])
return t_labels
def sample_sentences(self, classes, tags):
"""
Sample support and query sentences. A greedy algorithm is implemented
that always sample less freqent classes first.
:param classes: the entity classes of interests
:param n_shot: the number of support points
:param n_query: the number of query points
:return: two lists of sentence Ids for support and query sets
respectively
"""
sup_sents, query_sents = [], []
# sample support set
sampled_cls_counters = {cls: 0 for cls in classes}
for cls in classes:
# not enough sentences for the class, so sample with replacement
replacement = (len(self.class_sent_map[cls]) < self.n_shot)
while sampled_cls_counters[cls] < self.n_shot:
sent, _ = random.choice(self.class_sent_map[cls])
if not replacement and sent in sup_sents:
continue
for inn_cls, freq in self.sent_class_map[sent]:
if inn_cls in sampled_cls_counters:
sampled_cls_counters[inn_cls] += freq
sup_sents.append(sent)
# sample query set
sampled_cls_counters = {cls: 0 for cls in classes}
for cls in classes:
# not enough sentences for the class, so sample with replacement
replacement = (len(self.class_sent_map[cls]) < self.n_shot + self.n_query)
while sampled_cls_counters[cls] < self.n_query:
sent, _ = random.choice(self.class_sent_map[cls])
if not replacement and (sent in sup_sents
or sent in query_sents):
continue
for inn_cls, freq in self.sent_class_map[sent]:
if inn_cls in sampled_cls_counters:
sampled_cls_counters[inn_cls] += freq
query_sents.append(sent)
return MetaNERDataset(
[self.dataset[d].words for d in sup_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in sup_sents],
self.n_cls + 1
), MetaNERDataset(
[self.dataset[d].words for d in query_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in query_sents],
self.n_cls + 1
)
def _sample_classes(self):
"""
Subsample entity classes, sorted by frequencies
:param targets: target classes to sample from
:param n_cls: num of entity classes to sample
:return: a list of classes
"""
sorted_list = []
for cls, val in self.class_sent_map.items():
if cls not in self.classes:
continue
sorted_list.append((cls, len(val)))
assert len(sorted_list) >= self.n_cls
random.shuffle(sorted_list)
sorted_list = sorted_list[:self.n_cls]
sorted_list = sorted(sorted_list, key=lambda p: p[1])
return [cls for cls, _ in sorted_list]
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
def read_examples_from_file(data_dir, valid_labels):
print (f'valid labels: {valid_labels}')
file_path = data_dir
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
if line.startswith("-DOCSTART-") or line.strip() == "":
if words:
for i, label in enumerate(labels):
if label not in valid_labels:
labels[i] = 'O'
examples.append(InputExample(guid="{}".format(guid_index), words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split()
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
for i, label in enumerate(labels):
if label not in valid_labels:
labels[i] = 'O'
examples.append(InputExample(guid="{}".format(guid_index), words=words, labels=labels))
label_map = defaultdict(int)
for i, label in enumerate(valid_labels): # assumption that valid_labels[0] == 'O'
if label == 'O':
label_map[label] = i
else:
if label[2:] not in label_map:
label_map[label[2:]] = len(label_map)
return examples, label_map
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class MetaNERDataset(data.Dataset):
def __init__(self, sentences, labels, n_classes):
self.sentences = sentences
self.labels = labels
self.n_classes = n_classes
def __len__(self):
return len(self.sentences)
def __getitem__(self, index):
return self.sentences[index], self.labels[index]
# class MetaNERDataset(data.Dataset):
# def __init__(self, file_name):
# json_dict = utils.read_json(file_name)
# self.sentences, self.labels = [], []
# for entry in json_dict:
# self.sentences.append(entry['sentence'])
# self.labels.append(entry['label'])
# self.n_classes = np.max(list(itertools.chain(*self.labels))) + 1
# def __len__(self):
# return len(self.sentences)
# def __getitem__(self, index):
# return self.sentences[index], self.labels[index]
class SequentialSampler:
def __init__(self, dataset, labels, label_map, n_cls, n_shot, n_query=5, n_batch=100):
print (f'Number of examples in NER dataset is {len(dataset)}')
self.labels = labels
self.classes = set()
for lab in labels:
if len(lab) > 2:
self.classes.add(lab[2:])
self.label_map = label_map
self.n_cls = n_cls
self.n_shot = n_shot
self.n_query = n_query
self.n_batch = n_batch
self.dataset = dataset
print ("{}-way {}-shot with {}-query and {} batchsize".format(self.n_cls, self.n_shot, self.n_query, self.n_batch))
self.sent_class_map, self.class_sent_map = self._get_sent_class_maps(dataset)
# stats on data
print ('## STATISTICS ##')
for cls in self.class_sent_map:
print (cls, len(self.class_sent_map[cls]))
self.data = self.make_episodes()
def make_episodes(self):
"""
Sample mini-batches for episode training
"""
tags_epi, sup_epi, query_epi = [], [], []
for _ in trange(self.n_batch):
classes = self._sample_classes()
# print ("sampled classes", classes)
tags = defaultdict(lambda:-1)
tags['O'] = 0
for cls in classes:
if cls not in tags:
tags[cls] = len(tags)
sup_sents = self.sample_support_sentences(classes,tags)
for i in range(int(len(self.dataset)/(self.n_cls*self.n_shot))):
query_sents = self.sample_query_sentences(classes, tags, i)
tags_epi.append(tags)
sup_epi.append(sup_sents)
query_epi.append(query_sents)
return tags_epi, sup_epi, query_epi
def __getitem__(self, index):
return self.data[0][index], self.data[1][index], self.data[2][index]
def __len__(self):
return self.n_batch
@staticmethod
def _get_sent_class_maps(dataset):
# map from a sentence Id to a list of pairs with
# B-Xs and the freqs of B-X in the sentence
sent_class_map = defaultdict(list)
# map from B-X to a list of pairs with
# sentence ids and the freqs of B-X in the sentence
class_sent_map = defaultdict(list)
for i, sent in enumerate(dataset):
_, tags = sent.words, sent.labels
class_freqs = Counter()
for tag in tags:
if tag.startswith('B-'):
# we only store the `X` part of `B-X`
class_freqs[tag[2:]] += 1
for cls, freq in class_freqs.items():
sent_class_map[i].append((cls, freq))
class_sent_map[cls].append((i, freq))
return sent_class_map, class_sent_map
def tagged_labels(self, labels, tags):
return [
tags[lab[2:]] if len(lab) > 2 else tags[lab]
for lab in labels
]
def sample_support_sentences(self, classes, tags):
"""
Sample support and query sentences. A greedy algorithm is implemented
that always sample less freqent classes first.
:param classes: the entity classes of interests
:param n_shot: the number of support points
:param n_query: the number of query points
:return: two lists of sentence Ids for support and query sets
respectively
"""
sup_sents = []
# sample support set
sampled_cls_counters = {cls: 0 for cls in classes}
for cls in classes:
# not enough sentences for the class, so sample with replacement
replacement = (len(self.class_sent_map[cls]) < self.n_shot)
while sampled_cls_counters[cls] < self.n_shot:
sent, _ = random.choice(self.class_sent_map[cls])
if not replacement and sent in sup_sents:
continue
for inn_cls, freq in self.sent_class_map[sent]:
if inn_cls in sampled_cls_counters:
sampled_cls_counters[inn_cls] += freq
sup_sents.append(sent)
return MetaNERDataset(
[self.dataset[d].words for d in sup_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in sup_sents],
self.n_cls+1
)
def sample_query_sentences(self, classes, tags, i):
"""
Sample support and query sentences. A greedy algorithm is implemented
that always sample less freqent classes first.
:param classes: the entity classes of interests
:param n_shot: the number of support points
:param n_query: the number of query points
:return: two lists of sentence Ids for support and query sets
respectively
"""
query_sents = [d for d in range(i*self.n_cls*self.n_shot,(i+1)*self.n_cls*self.n_shot)]
return MetaNERDataset(
[self.dataset[d].words for d in query_sents],
[self.tagged_labels(self.dataset[d].labels, tags) for d in query_sents],
self.n_cls+1
)
def _sample_classes(self):
"""
Subsample entity classes, sorted by frequencies
:param targets: target classes to sample from
:param n_cls: num of entity classes to sample
:return: a list of classes
"""
sorted_list = []
for cls, val in self.class_sent_map.items():
if cls not in self.classes:
continue
sorted_list.append((cls, len(val)))
assert len(sorted_list) >= self.n_cls
random.shuffle(sorted_list)
sorted_list = sorted_list[:self.n_cls]
sorted_list = sorted(sorted_list, key=lambda p: p[1])
return [cls for cls, _ in sorted_list]
class SupervisedSampler:
def __init__(self, dataset, labels, batch_size=30):
print (f'Number of examples in NER dataset is {len(dataset)}')
self.labels = labels
self.classes = []
for lab in labels:
if len(lab) > 2:
self.classes.append(lab[2:])
self.batch_size = batch_size
self.n_batch = len(dataset) // self.batch_size
self.dataset = dataset
self.sent_class_map, self.class_sent_map = self._get_sent_class_maps(dataset)
# stats on data
print ('## STATISTICS ##')
for cls in self.class_sent_map:
print (cls, len(self.class_sent_map[cls]))
self.data = self.make_batches()
def make_batches(self):
"""
Sample mini-batches for episode training
"""
batches = []
tags = defaultdict(lambda:-1)
tags['O'] = 0
for cls in self.classes:
if cls not in tags:
tags[cls] = len(tags)
self.tags = tags
random.shuffle(self.dataset)
for i in trange(self.n_batch):
batch = self.sample_batch_sentences(i*self.batch_size, self.batch_size)
batches.append(batch)
return batches
def __getitem__(self, index):
return self.tags, self.data[index]
def __len__(self):
return self.n_batch
@staticmethod
def _get_sent_class_maps(dataset):
# map from a sentence Id to a list of pairs with
# B-Xs and the freqs of B-X in the sentence
sent_class_map = defaultdict(list)
# map from B-X to a list of pairs with
# sentence ids and the freqs of B-X in the sentence
class_sent_map = defaultdict(list)
for i, sent in enumerate(dataset):
_, tags = sent.words, sent.labels
class_freqs = Counter()
for tag in tags:
if tag.startswith('B-'):
# we only store the `X` part of `B-X`
class_freqs[tag[2:]] += 1
for cls, freq in class_freqs.items():
sent_class_map[i].append((cls, freq))
class_sent_map[cls].append((i, freq))
return sent_class_map, class_sent_map
def tagged_labels(self, labels, tags):
t_labels = []
for lab in labels:
if len(lab) > 2: lab = lab[2:]
if lab not in tags:
t_labels.append(-1)
else:
t_labels.append(tags[lab])
return t_labels
def sample_batch_sentences(self, startIdx, batch_size):
sents = list(range(startIdx, startIdx + batch_size))
return MetaNERDataset(
[self.dataset[d].words for d in sents],
[self.tagged_labels(self.dataset[d].labels, self.tags) for d in sents],
len(self.classes)
)
| 37.307385
| 123
| 0.569311
| 2,399
| 18,691
| 4.249687
| 0.088787
| 0.02256
| 0.030603
| 0.02668
| 0.779892
| 0.773811
| 0.765964
| 0.749681
| 0.745856
| 0.74105
| 0
| 0.005004
| 0.337061
| 18,691
| 500
| 124
| 37.382
| 0.817771
| 0.189235
| 0
| 0.710448
| 0
| 0
| 0.027185
| 0
| 0
| 0
| 0
| 0
| 0.00597
| 1
| 0.089552
| false
| 0
| 0.029851
| 0.026866
| 0.21194
| 0.035821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e84d4a5decc6c49e64831cb5acb6bf62262b9049
| 276
|
py
|
Python
|
bentoml/paddle.py
|
francoisserra/BentoML
|
213e9e9b39e055286f2649c733907df88e6d2503
|
[
"Apache-2.0"
] | 1
|
2021-06-12T17:04:07.000Z
|
2021-06-12T17:04:07.000Z
|
bentoml/paddle.py
|
francoisserra/BentoML
|
213e9e9b39e055286f2649c733907df88e6d2503
|
[
"Apache-2.0"
] | 4
|
2021-05-16T08:06:25.000Z
|
2021-11-13T08:46:36.000Z
|
bentoml/paddle.py
|
francoisserra/BentoML
|
213e9e9b39e055286f2649c733907df88e6d2503
|
[
"Apache-2.0"
] | null | null | null |
from ._internal.frameworks.paddle import load
from ._internal.frameworks.paddle import save
from ._internal.frameworks.paddle import load_runner
from ._internal.frameworks.paddle import import_from_paddlehub
__all__ = ["import_from_paddlehub", "load", "load_runner", "save"]
| 39.428571
| 66
| 0.822464
| 35
| 276
| 6.085714
| 0.285714
| 0.225352
| 0.413146
| 0.525822
| 0.676056
| 0.356808
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 276
| 6
| 67
| 46
| 0.841897
| 0
| 0
| 0
| 0
| 0
| 0.144928
| 0.076087
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8507d06490cbd085cac36479e442057aa3863d2
| 28,595
|
py
|
Python
|
src/training/train.py
|
KinanZ/open_clip
|
6e76ee3b3a15ee6c4a187853fd123c967721c32b
|
[
"MIT"
] | null | null | null |
src/training/train.py
|
KinanZ/open_clip
|
6e76ee3b3a15ee6c4a187853fd123c967721c32b
|
[
"MIT"
] | null | null | null |
src/training/train.py
|
KinanZ/open_clip
|
6e76ee3b3a15ee6c4a187853fd123c967721c32b
|
[
"MIT"
] | null | null | null |
import os
import time
import json
import numpy as np
import torch
import torch.nn as nn
from sklearn import decomposition
from torch.cuda.amp import autocast
import torch.distributed as dist
import sys
sys.path.append('/misc/student/alzouabk/Thesis/self_supervised_pretraining/open_clip_thesis/src/')
from training.zero_shot import zero_shot_eval
import pdb
import wandb
import logging
def is_master(args):
return (not args.distributed) or args.gpu == 0
def get_weights(labels, class_weights):
weights = torch.ones(labels.shape[0])
for i in range(labels.shape[0]):
sample_label = torch.where(labels[i])[0]
sample_weights = []
for class_label in sample_label:
sample_weights.append(class_weights[class_label.item()])
weights[i] = max(sample_weights)
return weights
def get_loss(model, images, loss_img, loss_txt, class_weights, texts, labels, args):
image_features, text_features, logit_scale = model(images, texts)
logit_scale = logit_scale.mean()
if args.distributed and args.aggregate:
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more negatives to contrast with.
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
gathered_labels = [
torch.zeros_like(labels) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
dist.all_gather(gathered_labels, labels)
all_image_features = torch.cat(
[image_features]
+ gathered_image_features[:rank]
+ gathered_image_features[rank + 1:]
)
all_text_features = torch.cat(
[text_features]
+ gathered_text_features[:rank]
+ gathered_text_features[rank + 1:]
)
labels = torch.cat(
[labels]
+ gathered_labels[:rank]
+ gathered_labels[rank + 1:]
)
if args.new_model:
gathered_texts = [torch.zeros_like(texts['input_ids']) for _ in range(world_size)]
dist.all_gather(gathered_texts, texts['input_ids'])
texts = torch.cat(
[texts['input_ids']]
+ gathered_texts[:rank]
+ gathered_texts[rank + 1:]
)
else:
gathered_texts = [torch.zeros_like(texts) for _ in range(world_size)]
dist.all_gather(gathered_texts, texts)
texts = torch.cat(
[texts]
+ gathered_texts[:rank]
+ gathered_texts[rank + 1:]
)
# this is needed to send gradients back everywhere.
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
else:
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
if args.Label_grouped: # Basically supervised
ground_truth = torch.zeros(logits_per_image.shape).float()
for i in range(len(logits_per_image)):
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# instead of an eye matrix we have 1 on the diagonal and 1 if the sample from this column belongs to the healthy class
if labels[i][0] == 1:
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_Caption_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
if labels[i][0] == 1:
# replace 0 with 1 if the sample from this column belongs the healthy class
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
else:
# replace 0 with 1 if the sample from this column belongs the same deseased class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
elif args.Caption_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# replace 0 with 1 if the sample from this column belongs the same class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
else: # Default Clip loss
ground_truth = torch.arange(len(logits_per_image)).long()
weights = get_weights(labels, class_weights)
if args.gpu is not None:
ground_truth = ground_truth.cuda(args.gpu, non_blocking=True)
weights = weights.cuda(args.gpu, non_blocking=True)
loss_vision = loss_img(logits_per_image, ground_truth)
loss_vision = (loss_vision * weights).mean()
loss_text = loss_txt(logits_per_text, ground_truth)
loss_text = (loss_text * weights).mean()
total_loss = (loss_vision + loss_text) / 2
return total_loss
def train(model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None):
os.environ["WDS_EPOCH"] = str(epoch)
model.train()
dataloader, sampler = data['train'].dataloader, data['train'].sampler
if args.default_loss:
loss_img = nn.CrossEntropyLoss(reduction='none')
loss_txt = nn.CrossEntropyLoss(reduction='none')
else:
loss_img = nn.BCEWithLogitsLoss(reduction='none')
loss_txt = nn.BCEWithLogitsLoss(reduction='none')
if args.use_weights_1:
# class weights where the weight of a class is: 1 - (class_count / total_count)
class_weights = {0: 0.5, 1: 0.995, 2: 0.927, 3: 0.964, 4: 0.989, 5: 0.994, 6: 0.993, 7: 0.997,
8: 0.856, 9: 0.903, 10: 0.998, 11: 0.879, 12: 0.9984, 13: 0.972, 14: 0.988}
elif args.use_weights_2:
# class weights where the weight of a class is: total_count - (num_of_classes / class_count)
class_weights = {0: 0.133, 1: 14.129, 2: 0.913, 3: 1.868, 4: 6.191, 5: 10.805, 6: 9.501, 7: 26.24,
8: 0.461, 9: 0.685, 10: 32.415, 11: 0.552, 12: 30.61, 13: 2.35, 14: 5.681}
else:
class_weights = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 6: 1.0, 7: 1.0,
8: 1.0, 9: 1.0, 10: 1.0, 11: 1.0, 12: 1.0, 13: 1.0, 14: 1.0}
if args.gpu is not None:
loss_img = loss_img.cuda(args.gpu)
loss_txt = loss_txt.cuda(args.gpu)
if args.distributed and sampler is not None:
sampler.set_epoch(epoch)
num_batches_per_epoch = dataloader.num_batches
end = time.time()
for i, batch in enumerate(dataloader):
step = num_batches_per_epoch * epoch + i
scheduler(step)
optimizer.zero_grad()
images, texts, labels = batch
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
labels = labels.cuda(args.gpu, non_blocking=True)
if args.new_model:
for key in texts:
texts[key] = texts[key].cuda(args.gpu, non_blocking=True)
else:
texts = texts.cuda(args.gpu, non_blocking=True)
data_time = time.time() - end
m = model.module if args.distributed or args.dp else model
# with automatic mixed precision.
if args.precision == "amp":
with autocast():
total_loss = get_loss(model, images, loss_img, loss_txt, class_weights, texts, labels, args)
scaler.scale(total_loss).backward()
scaler.step(optimizer)
scaler.update()
else:
total_loss = get_loss(model, images, loss_img, loss_txt, class_weights, texts, labels, args)
total_loss.backward()
optimizer.step()
# Note: we clamp to 4.6052 = ln(100), as in the original paper.
m.logit_scale.data = torch.clamp(m.logit_scale.data, 0, 4.6052)
batch_time = time.time() - end
end = time.time()
if is_master(args) and (i % 100) == 0:
num_samples = i * len(images) * args.world_size
samples_per_epoch = dataloader.num_samples
percent_complete = 100.0 * i / num_batches_per_epoch
logging.info(
f"Train Epoch: {epoch} [{num_samples}/{samples_per_epoch} ({percent_complete:.0f}%)]\t"
f"Loss: {total_loss.item():.6f}\tData (t) {data_time:.3f}\tBatch (t) {batch_time:.3f}"
f"\tLR: {optimizer.param_groups[0]['lr']:5f}\tlogit_scale {m.logit_scale.data:.3f}"
)
# save train loss / etc.
timestep = epoch * num_batches_per_epoch + i
log_data = {
"loss": total_loss.item(),
"data_time": data_time,
"batch_time": batch_time,
"scale": m.logit_scale.data.item(),
"lr": optimizer.param_groups[0]["lr"]
}
for name, val in log_data.items():
name = "train/" + name
if tb_writer is not None:
tb_writer.add_scalar(name, val, timestep)
if args.wandb:
wandb.log({name: val, 'step': timestep})
def evaluate(model, data, epoch, args, tb_writer=None, steps=None):
if not is_master(args):
return
model.eval()
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
dataloader = data['val'].dataloader
if args.default_loss:
loss_img = nn.CrossEntropyLoss()
loss_txt = nn.CrossEntropyLoss()
else:
loss_img = nn.BCEWithLogitsLoss()
loss_txt = nn.BCEWithLogitsLoss()
if args.gpu is not None:
loss_img = loss_img.cuda(args.gpu)
loss_txt = loss_txt.cuda(args.gpu)
cumulative_loss = 0.0
num_elements = 0.0
all_image_features, all_text_features, all_labels, all_texts = [], [], [], []
with torch.no_grad():
for batch in dataloader:
images, texts, labels = batch
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if args.new_model:
for key in texts:
texts[key] = texts[key].cuda(args.gpu, non_blocking=True)
else:
texts = texts.cuda(args.gpu, non_blocking=True)
image_features, text_features, logit_scale = model(images, texts)
if args.new_model:
texts = texts['input_ids']
all_image_features.append(image_features)
all_text_features.append(text_features)
all_labels.append(labels)
all_texts.append(texts)
logit_scale = logit_scale.mean()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
if args.Label_grouped:
ground_truth = torch.zeros(logits_per_image.shape).float()
for i in range(len(logits_per_image)):
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_grouped:
ground_truth = torch.eye(
len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# instead of an eye matrix we have 1 on the diagonal and 1 if the sample from this column belongs to the healthy class
if labels[i][0] == 1:
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_Caption_grouped:
ground_truth = torch.eye(
len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
if labels[i][0] == 1:
# replace 0 with 1 if the sample from this column belongs the healthy class
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
else:
# replace 0 with 1 if the sample from this column belongs the same deseased class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
elif args.Caption_grouped:
ground_truth = torch.eye(
len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# replace 0 with 1 if the sample from this column belongs the same class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
else:
ground_truth = torch.arange(len(logits_per_image)).long()
if args.gpu is not None:
ground_truth = ground_truth.cuda(args.gpu, non_blocking=True)
total_loss = (
loss_img(logits_per_image, ground_truth)
+ loss_txt(logits_per_text, ground_truth)
) / 2
batch_size = len(images)
cumulative_loss += total_loss * batch_size
num_elements += batch_size
if args.custom_eval:
metrics = get_metrics_custom(torch.cat(all_image_features),
torch.cat(all_text_features), torch.cat(all_labels), torch.cat(all_texts))
elif args.custom_eval_no_healthy:
metrics = get_metrics_custom_no_healthy(torch.cat(all_image_features),torch.cat(all_text_features), torch.cat(all_labels), torch.cat(all_texts))
else:
metrics = get_metrics(torch.cat(all_image_features), torch.cat(all_text_features))
loss = cumulative_loss / num_elements
metrics.update(
**{"val_loss": loss.item(), "epoch": epoch, "num_elements": num_elements}
)
metrics.update(zero_shot_metrics)
logging.info(
f"Eval Epoch: {epoch} "
+ "\t".join([f"{k}: {v:.4f}" for k, v in metrics.items()])
)
if args.save_logs:
if tb_writer is not None:
for name, val in metrics.items():
tb_writer.add_scalar(f"val/{name}", val, epoch)
if args.t_sne and epoch % 10 == 0:
all_labels_onehot = torch.cat(all_labels)
all_labels_int = []
for index in range(all_labels_onehot.shape[0]):
all_labels_int.append(onehot_to_int(all_labels_onehot[index]))
all_image_features = torch.cat(all_image_features).cpu().detach().numpy()
all_text_features = torch.cat(all_text_features).cpu().detach().numpy()
pca = decomposition.PCA(n_components=36)
pca.fit(all_image_features)
all_image_features = pca.transform(all_image_features)
pca.fit(all_text_features)
all_text_features = pca.transform(all_text_features)
tb_writer.add_embedding(mat=all_image_features, metadata=all_labels_int,
global_step=epoch, tag='val_image_features')
tb_writer.add_embedding(mat=all_text_features, metadata=all_labels_int,
global_step=epoch, tag='val_text_features')
if args.wandb:
for name, val in metrics.items():
wandb.log({f"val/{name}": val, 'epoch': epoch})
if args.save_logs:
with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
f.write(json.dumps(metrics))
f.write("\n")
return metrics
def evaluate_train(model, data, epoch, args, tb_writer=None, steps=None):
if not is_master(args):
return
model.eval()
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
dataloader = data['train'].dataloader
if args.default_loss:
loss_img = nn.CrossEntropyLoss()
loss_txt = nn.CrossEntropyLoss()
else:
loss_img = nn.BCEWithLogitsLoss()
loss_txt = nn.BCEWithLogitsLoss()
if args.gpu is not None:
loss_img = loss_img.cuda(args.gpu)
loss_txt = loss_txt.cuda(args.gpu)
cumulative_loss = 0.0
num_elements = 0.0
all_image_features, all_text_features, all_labels, all_texts = [], [], [], []
with torch.no_grad():
for batch in dataloader:
images, texts, labels = batch
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if args.new_model:
for key in texts:
texts[key] = texts[key].cuda(args.gpu, non_blocking=True)
else:
texts = texts.cuda(args.gpu, non_blocking=True)
image_features, text_features, logit_scale = model(images, texts)
if args.new_model:
texts = texts['input_ids']
all_image_features.append(image_features)
all_text_features.append(text_features)
all_labels.append(labels)
all_texts.append(texts)
logit_scale = logit_scale.mean()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
if args.Label_grouped:
ground_truth = torch.zeros(logits_per_image.shape).float()
for i in range(len(logits_per_image)):
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# instead of an eye matrix we have 1 on the diagonal and 1 if the sample from this column belongs to the healthy class
if labels[i][0] == 1:
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
elif args.Healthy_Caption_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
if labels[i][0] == 1:
#replace 0 with 1 if the sample from this column belongs the healthy class
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
else:
# replace 0 with 1 if the sample from this column belongs the same deseased class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
elif args.Caption_grouped:
ground_truth = torch.eye(len(logits_per_image)).float() # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_image)):
# replace 0 with 1 if the sample from this column belongs the same class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
else:
ground_truth = torch.arange(len(logits_per_image)).long()
if args.gpu is not None:
ground_truth = ground_truth.cuda(args.gpu, non_blocking=True)
total_loss = (
loss_img(logits_per_image, ground_truth)
+ loss_txt(logits_per_text, ground_truth)
) / 2
batch_size = len(images)
cumulative_loss += total_loss * batch_size
num_elements += batch_size
if args.custom_eval:
metrics = get_metrics_custom(torch.cat(all_image_features),
torch.cat(all_text_features), torch.cat(all_labels), torch.cat(all_texts))
elif args.custom_eval_no_healthy:
metrics = get_metrics_custom_no_healthy(torch.cat(all_image_features),torch.cat(all_text_features), torch.cat(all_labels), torch.cat(all_texts))
else:
metrics = get_metrics(torch.cat(all_image_features), torch.cat(all_text_features))
loss = cumulative_loss / num_elements
metrics.update(
**{"train_loss": loss.item(), "epoch": epoch, "num_elements": num_elements}
)
metrics.update(zero_shot_metrics)
logging.info(
f"Eval Train Epoch: {epoch} "
+ "\t".join([f"{k}: {v:.4f}" for k, v in metrics.items()])
)
if args.save_logs:
if tb_writer is not None:
for name, val in metrics.items():
tb_writer.add_scalar(f"train_eval/{name}", val, epoch)
if args.t_sne and epoch % 10 == 0:
all_labels_onehot = torch.cat(all_labels)
all_labels_int = []
for index in range(all_labels_onehot.shape[0]):
all_labels_int.append(onehot_to_int(all_labels_onehot[index]))
all_image_features = torch.cat(all_image_features).cpu().detach().numpy()
all_text_features = torch.cat(all_text_features).cpu().detach().numpy()
pca = decomposition.PCA(n_components=36)
pca.fit(all_image_features)
all_image_features = pca.transform(all_image_features)
pca.fit(all_text_features)
all_text_features = pca.transform(all_text_features)
tb_writer.add_embedding(mat=all_image_features, metadata=all_labels_int,
global_step=epoch, tag='train_image_features')
tb_writer.add_embedding(mat=all_text_features, metadata=all_labels_int,
global_step=epoch, tag='train_text_features')
if args.wandb:
for name, val in metrics.items():
wandb.log({f"train_eval/{name}": val, 'epoch': epoch})
if args.save_logs:
with open(os.path.join(args.checkpoint_path, "train_results.jsonl"), "a+") as f:
f.write(json.dumps(metrics))
f.write("\n")
return metrics
def get_metrics(image_features, text_features):
metrics = {}
logits_per_image = image_features @ text_features.t()
logits_per_text = logits_per_image.t()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = (
torch.arange(len(text_features)).view(-1, 1).to(logits_per_image.device)
)
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True)
preds = torch.where(ranking == ground_truth)[1]
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
def get_metrics_custom(image_features, text_features, labels, texts):
metrics = {}
logits_per_image = image_features @ text_features.t()
logits_per_text = logits_per_image.t()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = torch.eye(
len(logits_per_text)).float().to(logits_per_image.device) # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_text)):
if labels[i][0] == 1:
# replace 0 with 1 if the sample from this column belongs the healthy class
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(labels[i], labels[j])]
ground_truth[i][mask_same] = 1
else:
# replace 0 with 1 if the sample from this column belongs the same deseased class and have the same caption
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True).to(logits_per_image.device)
preds = torch.zeros(len(logits_per_text)).to(logits_per_image.device)
for j in range(len(logits_per_text)):
ground_truth_sample = torch.where(ground_truth[j])[0].view(-1, 1).to(logits_per_image.device)
preds[j] = torch.min(torch.where(ranking[j] == ground_truth_sample)[1])
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
def get_metrics_custom_no_healthy(image_features, text_features, labels, texts):
metrics = {}
logits_per_image = image_features @ text_features.t()
logits_per_text = logits_per_image.t()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = torch.eye(
len(logits_per_text)).float().to(logits_per_image.device) # logits_per_image.shape = logits_per_text.shape = ground_truth.shape = batchsize x batchsize
for i in range(len(logits_per_text)):
mask_same = [j for j in range(len(logits_per_image)) if torch.equal(texts[i], texts[j])]
ground_truth[i][mask_same] = 1
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True).to(logits_per_image.device)
preds = torch.zeros(len(logits_per_text)).to(logits_per_image.device)
for j in range(len(logits_per_text)):
ground_truth_sample = torch.where(ground_truth[j])[0].view(-1, 1).to(logits_per_image.device)
preds[j] = torch.min(torch.where(ranking[j] == ground_truth_sample)[1])
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
def onehot_to_int(lst):
return [i for i, x in enumerate(lst) if x > 0]
| 45.606061
| 166
| 0.602168
| 3,841
| 28,595
| 4.245249
| 0.079146
| 0.064026
| 0.072121
| 0.043788
| 0.815344
| 0.785784
| 0.775359
| 0.764872
| 0.758371
| 0.745738
| 0
| 0.018241
| 0.294492
| 28,595
| 626
| 167
| 45.678914
| 0.790027
| 0.098584
| 0
| 0.676
| 0
| 0.004
| 0.038033
| 0.010295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.028
| 0.004
| 0.07
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e854eac02ff984d86165518b112aa60249a5b42e
| 18
|
py
|
Python
|
allennlp/tests/fixtures/plugins/project_c/allennlp_plugins/c/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 1
|
2020-03-30T14:07:02.000Z
|
2020-03-30T14:07:02.000Z
|
allennlp/tests/fixtures/plugins/project_c/allennlp_plugins/c/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 123
|
2020-04-26T02:41:30.000Z
|
2021-08-02T21:18:00.000Z
|
allennlp/tests/fixtures/plugins/project_c/allennlp_plugins/c/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 2
|
2019-12-21T05:58:44.000Z
|
2021-08-16T07:41:21.000Z
|
from c.c import C
| 9
| 17
| 0.722222
| 5
| 18
| 2.6
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 1
| 18
| 18
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e85a582b4835c961353024f23cb7838de54c38e5
| 24
|
py
|
Python
|
torchvision/prototype/utils/__init__.py
|
yoshitomo-matsubara/vision
|
03d11338f3faf94a0749549912593ddb8b70be17
|
[
"BSD-3-Clause"
] | 12,063
|
2017-01-18T19:58:38.000Z
|
2022-03-31T23:08:44.000Z
|
torchvision/prototype/utils/__init__.py
|
yoshitomo-matsubara/vision
|
03d11338f3faf94a0749549912593ddb8b70be17
|
[
"BSD-3-Clause"
] | 4,673
|
2017-01-18T21:30:03.000Z
|
2022-03-31T20:58:33.000Z
|
torchvision/prototype/utils/__init__.py
|
yoshitomo-matsubara/vision
|
03d11338f3faf94a0749549912593ddb8b70be17
|
[
"BSD-3-Clause"
] | 7,132
|
2017-01-18T18:12:23.000Z
|
2022-03-31T21:19:10.000Z
|
from . import _internal
| 12
| 23
| 0.791667
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e8a9c1a7e5fb15f5604bf50940522a213e5cd010
| 131
|
py
|
Python
|
app/api/__init__.py
|
gladuo/VideoShow
|
544c6ccd98ee4da5950d914289f30b5e918aa1a6
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
gladuo/VideoShow
|
544c6ccd98ee4da5950d914289f30b5e918aa1a6
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
gladuo/VideoShow
|
544c6ccd98ee4da5950d914289f30b5e918aa1a6
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, videos, shows, users, comments, errors
| 26.2
| 68
| 0.770992
| 16
| 131
| 6.0625
| 0.75
| 0.247423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137405
| 131
| 5
| 68
| 26.2
| 0.858407
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
2cda9fc4c3b7e52ae6618733b4ca5902d28c7ffa
| 59
|
py
|
Python
|
src/models/exif_sc/__init__.py
|
lemonwaffle/nisemono
|
f2b32dbff63ea6de47460713aac8a768ff59f126
|
[
"MIT"
] | 7
|
2021-07-08T05:17:19.000Z
|
2021-12-29T05:45:24.000Z
|
src/models/exif_sc/__init__.py
|
yizhe-ang/fake-detection-lab
|
f2b32dbff63ea6de47460713aac8a768ff59f126
|
[
"MIT"
] | null | null | null |
src/models/exif_sc/__init__.py
|
yizhe-ang/fake-detection-lab
|
f2b32dbff63ea6de47460713aac8a768ff59f126
|
[
"MIT"
] | null | null | null |
from .exif_sc import EXIF_SC
from .networks import EXIF_Net
| 29.5
| 30
| 0.847458
| 11
| 59
| 4.272727
| 0.545455
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118644
| 59
| 2
| 30
| 29.5
| 0.903846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa44f06aa862871b09d8f10126a6cb038bef569f
| 47
|
py
|
Python
|
manabi/apps/manabi_auth/tests.py
|
aehlke/manabi
|
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
|
[
"MIT"
] | 14
|
2015-10-03T07:34:28.000Z
|
2021-09-20T07:10:29.000Z
|
manabi/apps/manabi_auth/tests.py
|
aehlke/manabi
|
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
|
[
"MIT"
] | 23
|
2019-10-25T08:47:23.000Z
|
2022-01-30T02:00:45.000Z
|
manabi/apps/manabi_auth/tests.py
|
aehlke/manabi
|
1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b
|
[
"MIT"
] | 7
|
2016-10-04T08:10:36.000Z
|
2021-09-20T07:10:33.000Z
|
from manabi.test_helpers import ManabiTestCase
| 23.5
| 46
| 0.893617
| 6
| 47
| 6.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa5fa0507088e412dac0381129f186b5aaf9c2d7
| 34
|
py
|
Python
|
masteronly.py
|
mbs5mz/cs3240-labdemo
|
bc6f04f136686394248e6629aeba0cd3bed7770f
|
[
"MIT"
] | null | null | null |
masteronly.py
|
mbs5mz/cs3240-labdemo
|
bc6f04f136686394248e6629aeba0cd3bed7770f
|
[
"MIT"
] | null | null | null |
masteronly.py
|
mbs5mz/cs3240-labdemo
|
bc6f04f136686394248e6629aeba0cd3bed7770f
|
[
"MIT"
] | null | null | null |
print("This is the master branch")
| 34
| 34
| 0.764706
| 6
| 34
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
fa6414e49146863505731712674d2bc44a0b263b
| 113
|
py
|
Python
|
pony/orm/tests/test_f_strings.py
|
luckydonald/pony
|
e733f14ef4e21514b49248b7b72aae0728029852
|
[
"Apache-2.0"
] | 2,628
|
2015-01-02T17:55:28.000Z
|
2022-03-31T10:36:42.000Z
|
pony/orm/tests/test_f_strings.py
|
luckydonald/pony
|
e733f14ef4e21514b49248b7b72aae0728029852
|
[
"Apache-2.0"
] | 525
|
2015-01-03T20:30:08.000Z
|
2022-03-23T12:30:01.000Z
|
pony/orm/tests/test_f_strings.py
|
luckydonald/pony
|
e733f14ef4e21514b49248b7b72aae0728029852
|
[
"Apache-2.0"
] | 256
|
2015-01-02T17:55:31.000Z
|
2022-03-20T17:01:37.000Z
|
from sys import version_info
if version_info[:2] >= (3, 6):
from pony.orm.tests.py36_test_f_strings import *
| 28.25
| 52
| 0.734513
| 20
| 113
| 3.9
| 0.8
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052083
| 0.150442
| 113
| 4
| 52
| 28.25
| 0.760417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d75da4931865b017feed0648dddd5ffd50baa642
| 120
|
py
|
Python
|
src/Bank.py
|
tokuma09/PyTDD
|
ae76cd7d6af13c383d4d860500c6291d924a56fd
|
[
"MIT"
] | null | null | null |
src/Bank.py
|
tokuma09/PyTDD
|
ae76cd7d6af13c383d4d860500c6291d924a56fd
|
[
"MIT"
] | 15
|
2021-05-10T13:29:25.000Z
|
2021-05-23T07:15:09.000Z
|
src/Bank.py
|
tokuma09/PyTDD
|
ae76cd7d6af13c383d4d860500c6291d924a56fd
|
[
"MIT"
] | null | null | null |
class Bank():
def __init__(self):
pass
def reduce(self, source, to):
return source.reduce(to)
| 15
| 33
| 0.575
| 15
| 120
| 4.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308333
| 120
| 7
| 34
| 17.142857
| 0.783133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
d7611de84d0c09241612fe2faace88345d97ec3c
| 18,916
|
py
|
Python
|
src/tests/presale/test_customer.py
|
n0emis/pretix
|
57d68eaddb01ec4adc0837a915631871cae4d91a
|
[
"Apache-2.0"
] | null | null | null |
src/tests/presale/test_customer.py
|
n0emis/pretix
|
57d68eaddb01ec4adc0837a915631871cae4d91a
|
[
"Apache-2.0"
] | 8
|
2015-01-06T10:50:27.000Z
|
2015-01-18T18:38:18.000Z
|
src/tests/presale/test_customer.py
|
n0emis/pretix
|
57d68eaddb01ec4adc0837a915631871cae4d91a
|
[
"Apache-2.0"
] | null | null | null |
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import datetime
from datetime import timedelta
from decimal import Decimal
from urllib.parse import parse_qs, urlparse
import pytest
from django.core import mail as djmail, signing
from django.core.signing import dumps
from django.test import Client
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pretix.base.models import Event, Item, Order, OrderPosition, Organizer
from pretix.multidomain.models import KnownDomain
from pretix.presale.forms.customer import TokenGenerator
@pytest.fixture
def env():
o = Organizer.objects.create(name='Big Events LLC', slug='bigevents')
o.settings.customer_accounts = True
event = Event.objects.create(
organizer=o, name='Conference', slug='conf',
date_from=now() + timedelta(days=10),
live=True, is_public=False
)
return o, event
@pytest.mark.django_db
def test_disabled(env, client):
env[0].settings.customer_accounts = False
r = client.get('/bigevents/account/register')
assert r.status_code == 404
r = client.get('/bigevents/account/login')
assert r.status_code == 404
r = client.get('/bigevents/account/pwreset')
assert r.status_code == 404
r = client.get('/bigevents/account/pwrecover')
assert r.status_code == 404
r = client.get('/bigevents/account/activate')
assert r.status_code == 404
r = client.get('/bigevents/account/change')
assert r.status_code == 404
r = client.get('/bigevents/account/confirmchange')
assert r.status_code == 404
r = client.get('/bigevents/account/')
assert r.status_code == 404
@pytest.mark.django_db
def test_org_register(env, client):
signer = signing.TimestampSigner(salt='customer-registration-captcha-127.0.0.1')
r = client.post('/bigevents/account/register', {
'email': 'john@example.org',
'name_parts_0': 'John Doe',
'challenge': signer.sign('1+2'),
'response': '3',
}, REMOTE_ADDR='127.0.0.1')
assert r.status_code == 302
assert len(djmail.outbox) == 1
with scopes_disabled():
customer = env[0].customers.get(email='john@example.org')
assert not customer.is_verified
assert customer.is_active
r = client.post(
f'/bigevents/account/activate?id={customer.identifier}&token={TokenGenerator().make_token(customer)}', {
'password': 'PANioMR62',
'password_repeat': 'PANioMR62',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.check_password('PANioMR62')
assert customer.is_verified
@pytest.mark.django_db
def test_org_register_duplicate_email(env, client):
with scopes_disabled():
env[0].customers.create(email='john@example.org')
r = client.post('/bigevents/account/register', {
'email': 'john@example.org',
'name_parts_0': 'John Doe',
})
assert b'already registered' in r.content
assert r.status_code == 200
@pytest.mark.django_db
def test_org_resetpw(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=False)
r = client.post('/bigevents/account/pwreset', {
'email': 'john@example.org',
})
assert r.status_code == 302
assert len(djmail.outbox) == 1
r = client.post(
f'/bigevents/account/pwrecover?id={customer.identifier}&token={TokenGenerator().make_token(customer)}', {
'password': 'PANioMR62',
'password_repeat': 'PANioMR62',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.check_password('PANioMR62')
assert customer.is_verified
@pytest.mark.django_db
def test_org_activate_invalid_token(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=False)
r = client.get(
f'/bigevents/account/activate?id={customer.identifier}&token=.invalid.{TokenGenerator().make_token(customer)}')
assert r.status_code == 302
@pytest.mark.django_db
def test_org_login_logout(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.get('/bigevents/account/')
assert r.status_code == 200
r = client.get('/bigevents/account/logout')
assert r.status_code == 302
r = client.get('/bigevents/account/')
assert r.status_code == 302
@pytest.mark.django_db
def test_org_login_invalid_password(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'invalid',
})
assert r.status_code == 200
assert b'alert-danger' in r.content
@pytest.mark.django_db
def test_org_login_not_verified(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=False)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 200
assert b'alert-danger' in r.content
@pytest.mark.django_db
def test_org_login_not_active(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True, is_active=False)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 200
assert b'alert-danger' in r.content
@pytest.mark.django_db
@pytest.mark.parametrize("url", [
"account/change",
"account/membership/1/",
"account/",
])
def test_login_required(client, env, url):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
assert client.get('/bigevents/' + url).status_code == 302
client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert client.get('/bigevents/' + url).status_code in (200, 404)
@pytest.mark.django_db
def test_org_order_list(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
event = env[1]
ticket = Item.objects.create(event=event, name='Early-bird ticket', default_price=23, admission=True)
o1 = Order.objects.create(
status=Order.STATUS_PENDING,
event=event,
email='admin@localhost',
datetime=now() - datetime.timedelta(days=3),
expires=now() + datetime.timedelta(days=11),
total=Decimal("23"),
)
OrderPosition.objects.create(
order=o1,
item=ticket,
variation=None,
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"}
)
o2 = Order.objects.create(
status=Order.STATUS_PENDING,
event=event,
email='john@example.org',
datetime=now() - datetime.timedelta(days=3),
expires=now() + datetime.timedelta(days=11),
total=Decimal("23"),
)
OrderPosition.objects.create(
order=o2,
item=ticket,
variation=None,
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"}
)
o3 = Order.objects.create(
status=Order.STATUS_PENDING,
event=event,
email='admin@localhost',
customer=customer,
datetime=now() - datetime.timedelta(days=3),
expires=now() + datetime.timedelta(days=11),
total=Decimal("23"),
)
OrderPosition.objects.create(
order=o3,
item=ticket,
variation=None,
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"}
)
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.get('/bigevents/account/')
assert r.status_code == 200
content = r.content.decode()
assert o1.code not in content
assert o2.code not in content
assert o3.code in content
env[0].settings.customer_accounts_link_by_email = True
r = client.get('/bigevents/account/')
assert r.status_code == 200
content = r.content.decode()
assert o1.code not in content
assert o2.code in content
assert o3.code in content
@pytest.mark.django_db
def test_change_name(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.post('/bigevents/account/change', {
'name_parts_0': 'John Doe',
'email': 'john@example.org',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.name == 'John Doe'
@pytest.mark.django_db
def test_change_email(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.post('/bigevents/account/change', {
'name_parts_0': 'John Doe',
'email': 'john@example.com'
})
assert r.status_code == 200
customer.refresh_from_db()
assert customer.email == 'john@example.org'
r = client.post('/bigevents/account/change', {
'name_parts_0': 'John Doe',
'email': 'john@example.com',
'password_current': 'foo',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.email == 'john@example.org'
assert len(djmail.outbox) == 1
token = dumps({
'customer': customer.pk,
'email': 'john@example.com'
}, salt='pretix.presale.views.customer.ChangeInformationView')
r = client.get(f'/bigevents/account/confirmchange?token={token}')
assert r.status_code == 302
customer.refresh_from_db()
assert customer.email == 'john@example.com'
@pytest.mark.django_db
def test_change_pw(env, client):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
r = client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert r.status_code == 302
r = client.post('/bigevents/account/password', {
'password_current': 'invalid',
'password': 'aYLBRNg4',
'password_repeat': 'aYLBRNg4',
})
assert r.status_code == 200
customer.refresh_from_db()
assert customer.check_password('foo')
r = client.post('/bigevents/account/password', {
'password_current': 'foo',
'password': 'aYLBRNg4',
'password_repeat': 'aYLBRNg4',
})
assert r.status_code == 302
customer.refresh_from_db()
assert customer.check_password('aYLBRNg4')
@pytest.mark.django_db
def test_login_per_org(env, client):
with scopes_disabled():
o2 = Organizer.objects.create(name='Demo', slug='demo')
o2.settings.customer_accounts = True
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
client.post('/bigevents/account/login', {
'email': 'john@example.org',
'password': 'foo',
})
assert client.get('/bigevents/account/').status_code == 200
assert client.get('/demo/account/').status_code == 302
@pytest.fixture
def client2():
# We need a second test client instance for cross domain stuff since the test client
# does not isolate sessions per-domain like browsers do
return Client()
def _cross_domain_login(env, client, client2):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
KnownDomain.objects.create(domainname='org.test', organizer=env[0])
KnownDomain.objects.create(domainname='event.test', organizer=env[0], event=env[1])
# Log in on org domain
r = client.post('/account/login?next=https://event.test/redeem&request_cross_domain_customer_auth=true', {
'email': 'john@example.org',
'password': 'foo',
}, HTTP_HOST='org.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'event.test'
assert u.path == '/redeem'
q = parse_qs(u.query)
assert 'cross_domain_customer_auth' in q
# Take session over to event domain
r = client2.get(f'/?{u.query}', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
@pytest.mark.django_db
def test_cross_domain_login(env, client, client2):
_cross_domain_login(env, client, client2)
# Logged in on org domain
r = client.get('/', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
# Logged in on event domain
r = client2.get('/', HTTP_HOST='org.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
@pytest.mark.django_db
def test_cross_domain_logout_on_org_domain(env, client, client2):
_cross_domain_login(env, client, client2)
r = client.get('/account/logout', HTTP_HOST='org.test')
assert r.status_code == 302
# Logged out on org domain
r = client.get('/', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
# Logged out on event domain
r = client2.get('/', HTTP_HOST='org.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
@pytest.mark.django_db
def test_cross_domain_logout_on_event_domain(env, client, client2):
_cross_domain_login(env, client, client2)
r = client2.get('/account/logout?next=/redeem', HTTP_HOST='event.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'org.test'
assert u.path == '/account/logout'
r = client.get(f'{u.path}?{u.query}', HTTP_HOST='org.test')
assert r.status_code == 302
assert r.headers['Location'] == 'http://event.test/redeem'
# Logged out on org domain
r = client.get('/', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
# Logged out on event domain
r = client2.get('/', HTTP_HOST='org.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
@pytest.mark.django_db
def test_cross_domain_login_otp_only_valid_once(env, client, client2):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
KnownDomain.objects.create(domainname='org.test', organizer=env[0])
KnownDomain.objects.create(domainname='event.test', organizer=env[0], event=env[1])
# Log in on org domain
r = client.post('/account/login?next=https://event.test/redeem&request_cross_domain_customer_auth=true', {
'email': 'john@example.org',
'password': 'foo',
}, HTTP_HOST='org.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'event.test'
assert u.path == '/redeem'
q = parse_qs(u.query)
assert 'cross_domain_customer_auth' in q
# Take session over to event domain
r = client.get(f'/?{u.query}', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' in r.content
# Try to use again
r = client2.get(f'/?{u.query}', HTTP_HOST='event.test')
assert r.status_code == 200
assert b'john@example.org' not in r.content
@pytest.mark.django_db
def test_cross_domain_login_validate_redirect_url(env, client, client2):
with scopes_disabled():
customer = env[0].customers.create(email='john@example.org', is_verified=True)
customer.set_password('foo')
customer.save()
KnownDomain.objects.create(domainname='org.test', organizer=env[0])
KnownDomain.objects.create(domainname='event.test', organizer=env[0], event=env[1])
# Log in on org domain
r = client.post('/account/login?next=https://evilcorp.test/redeem&request_cross_domain_customer_auth=true', {
'email': 'john@example.org',
'password': 'foo',
}, HTTP_HOST='org.test')
assert r.status_code == 302
u = urlparse(r.headers['Location'])
assert u.netloc == 'org.test'
assert u.path == '/account/'
q = parse_qs(u.query)
assert 'cross_domain_customer_auth' not in q
| 33.185965
| 119
| 0.656164
| 2,466
| 18,916
| 4.907948
| 0.124899
| 0.042965
| 0.051557
| 0.067421
| 0.771214
| 0.751136
| 0.733372
| 0.702636
| 0.680245
| 0.659423
| 0
| 0.020162
| 0.208131
| 18,916
| 569
| 120
| 33.244288
| 0.787836
| 0.083263
| 0
| 0.728929
| 0
| 0.006834
| 0.216296
| 0.071078
| 0
| 0
| 0
| 0
| 0.223235
| 1
| 0.052392
| false
| 0.100228
| 0.029613
| 0.002278
| 0.08656
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d76d863e3b44be2672e1cb5cd88dd7bc048a830a
| 446
|
py
|
Python
|
test3/routes/MainRoute.py
|
Ca11MeE/dophon
|
6737b0f0dc9ec2c2229865940c3c6d6ee326fc28
|
[
"Apache-2.0"
] | 1
|
2018-08-13T09:57:34.000Z
|
2018-08-13T09:57:34.000Z
|
test3/routes/MainRoute.py
|
Ca11MeE/dophon
|
6737b0f0dc9ec2c2229865940c3c6d6ee326fc28
|
[
"Apache-2.0"
] | null | null | null |
test3/routes/MainRoute.py
|
Ca11MeE/dophon
|
6737b0f0dc9ec2c2229865940c3c6d6ee326fc28
|
[
"Apache-2.0"
] | null | null | null |
from dophon import *
from dophon.annotation import *
app = blue_print('main', __name__,url_prefix='/main')
@RequestMapping('/', ['get'])
@ResponseTemplate(['index.html'])
def index():
return {}
@GetRoute('/get')
@ResponseTemplate(['index.html'])
def get_index():
return {}
@PostRoute('/post')
@ResponseTemplate(['index.html'])
def post_index():
return {}
@Get
@ResponseTemplate(['index.html'])
def test_get():
return {}
| 14.866667
| 53
| 0.656951
| 49
| 446
| 5.795918
| 0.44898
| 0.295775
| 0.352113
| 0.394366
| 0.327465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141256
| 446
| 29
| 54
| 15.37931
| 0.741514
| 0
| 0
| 0.421053
| 0
| 0
| 0.139013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.105263
| 0.210526
| 0.526316
| 0.052632
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ad099a3f7a3f39b1c81dfbd2b6b67a25e14da906
| 25
|
py
|
Python
|
eqparse/spaceloads/__init__.py
|
TfedUD/eqparse
|
ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71
|
[
"MIT"
] | 3
|
2021-01-26T18:48:39.000Z
|
2021-07-14T23:22:09.000Z
|
eqparse/spaceloads/__init__.py
|
TfedUD/eqparse
|
ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71
|
[
"MIT"
] | null | null | null |
eqparse/spaceloads/__init__.py
|
TfedUD/eqparse
|
ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71
|
[
"MIT"
] | 3
|
2020-11-18T20:22:00.000Z
|
2021-07-14T18:55:31.000Z
|
from .spaceloads import *
| 25
| 25
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad142cceab8899fa59076896998cb49029523f11
| 2,793
|
py
|
Python
|
sendmail.py
|
jvadair/simpleforum
|
d1e602841e64130c0059c7390ac2fbe7950feb89
|
[
"MIT"
] | null | null | null |
sendmail.py
|
jvadair/simpleforum
|
d1e602841e64130c0059c7390ac2fbe7950feb89
|
[
"MIT"
] | null | null | null |
sendmail.py
|
jvadair/simpleforum
|
d1e602841e64130c0059c7390ac2fbe7950feb89
|
[
"MIT"
] | null | null | null |
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
SMTP_URL = "example.com"
def send_verification_code(recipient, recipient_name, verification_code):
sender_email = "simpleforum@jvadair.com"
with open('.smtp_passwd') as password_file:
password = password_file.read()
message = MIMEMultipart("alternative")
message["Subject"] = "Email Verification"
message["From"] = sender_email
message["To"] = recipient
# Create the plain-text and HTML version of your message
with open('verification_template.html', 'r') as templateobj:
html = templateobj.read()
html = html.replace('$$name', recipient_name)
html = html.replace('$$verification_code', verification_code)
# Turn these into plain/html MIMEText objects
# part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
# message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_URL, 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, recipient, message.as_string()
)
def send_thread_notif(recipient, recipient_name, forum, author, content):
sender_email = "simpleforum@jvadair.com"
with open('.smtp_passwd') as password_file:
password = password_file.read()
message = MIMEMultipart("alternative")
message["Subject"] = f"New message on {forum}"
message["From"] = sender_email
message["To"] = recipient
# Create the plain-text and HTML version of your message
with open('forum_notif_template.html', 'r') as templateobj:
html = templateobj.read()
html = html.replace('$$name', recipient_name)
html = html.replace('$$forum', forum)
html = html.replace('$$author', author)
html = html.replace('$$content', content)
# Turn these into plain/html MIMEText objects
# part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
# message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(SMTP_URL, 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, recipient, message.as_string()
)
| 36.75
| 74
| 0.668815
| 332
| 2,793
| 5.509036
| 0.237952
| 0.048114
| 0.049207
| 0.031711
| 0.792783
| 0.792783
| 0.792783
| 0.792783
| 0.792783
| 0.792783
| 0
| 0.006515
| 0.230576
| 2,793
| 75
| 75
| 37.24
| 0.844579
| 0.223058
| 0
| 0.652174
| 0
| 0
| 0.137019
| 0.046635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0.130435
| 0.065217
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ad322b052b2b88031cf1b45b1db093b00f0d7cf1
| 8,137
|
py
|
Python
|
tests/test_position_stk_short.py
|
nwillemse/nctrader
|
4754ccdeae465ef4674a829f35fc3f78cf1d3ea4
|
[
"MIT"
] | 1
|
2019-11-13T06:38:12.000Z
|
2019-11-13T06:38:12.000Z
|
tests/test_position_stk_short.py
|
nwillemse/nctrader
|
4754ccdeae465ef4674a829f35fc3f78cf1d3ea4
|
[
"MIT"
] | null | null | null |
tests/test_position_stk_short.py
|
nwillemse/nctrader
|
4754ccdeae465ef4674a829f35fc3f78cf1d3ea4
|
[
"MIT"
] | 1
|
2021-05-11T11:24:08.000Z
|
2021-05-11T11:24:08.000Z
|
import unittest
from datetime import datetime
from nctrader.position2 import Position
from nctrader.price_parser import PriceParser
class TestShortRoundTripSPYPosition(unittest.TestCase):
"""
Test a round-trip trade in SPY ETF where the initial
trade is a buy/long of 100 shares of SPY, at a price of
$220.45, with $1.00 commission.
"""
def setUp(self):
"""
Set up the Position object that will store the PnL.
"""
self.position = Position(
"SLD", "SPY", 400,
PriceParser.parse(244.15), PriceParser.parse(4.18),
PriceParser.parse(244.05), PriceParser.parse(244.06),
datetime(2016, 1, 1)
)
print(self.position, '\n')
def test_calculate_round_trip(self):
"""
After the subsequent purchase, carry out two more buys/longs
and then close the position out with two additional sells/shorts.
"""
print("Sell 400 SPY at 244.15 with $4.18 commission. Update market value with bid/ask of 244.05/244.06:")
self.position.update_market_value(
PriceParser.parse(244.05), PriceParser.parse(244.06),
datetime(2016, 1, 2)
)
print(self.position, '\n')
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400)
self.assertEqual(self.position.open_quantity, 400)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), (244.15*400 - 4.18) / 400)
self.assertEqual(PriceParser.display(self.position.exit_price, 5), 0)
self.assertEqual(PriceParser.display(self.position.total_commission), 4.18)
self.assertEqual(PriceParser.display(self.position.cost_basis), -1*244.15*400 + 4.18)
self.assertEqual(PriceParser.display(self.position.market_value), -1*244.06*400, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*244.06*400) - (-1*244.15*400 + 4.18),2) , 2)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
print("Sell 250 SPY at 243.88 with $2.61 commission. Update market value with bid/ask of 243.47/243.48:")
self.position.transact_shares(
"SLD", 250, PriceParser.parse(243.88), PriceParser.parse(2.61)
)
self.position.update_market_value(
PriceParser.parse(243.47), PriceParser.parse(243.48),
datetime(2016, 1, 3)
)
print(self.position, '\n')
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250)
self.assertEqual(self.position.open_quantity, 400+250)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61) / 650, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), 0)
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), round(-1*244.15*400 + 4.18 -1*243.88*250 + 2.61, 2))
self.assertEqual(PriceParser.display(self.position.market_value), -1*243.48*650, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*243.48*650) - (-1*244.15*400 + 4.18 -1*243.88*250 + 2.61),2) , 2)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
print("Sell 150 SPY at 243.50 with $1.81 commission. Update market value with bid/ask of 243.50/243.51:")
self.position.transact_shares(
"SLD", 150, PriceParser.parse(243.50), PriceParser.parse(1.81)
)
self.position.update_market_value(
PriceParser.parse(243.50), PriceParser.parse(243.51),
datetime(2016, 1, 4)
)
print(self.position, '\n')
print("bots:", self.position.bots)
print("solds:", self.position.solds)
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250+150)
self.assertEqual(self.position.open_quantity, 400+250+150)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61 + 243.50*150+1.81) / 800, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), 0)
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61+1.81, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), round(-1*244.15*400 + 4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81, 2))
self.assertEqual(PriceParser.display(self.position.market_value), -1*243.51*800, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*243.51*800) - (-1*244.15*400 + 4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81),2) , 2)
self.assertEqual(PriceParser.display(self.position.realised_pnl), 0.00)
print("Buy 50 SPY at 243.77 with $1.00 commission. Update market value with bid/ask of 243.84/243.86:")
self.position.transact_shares(
"BOT", 50, PriceParser.parse(243.77), PriceParser.parse(1.00)
)
self.position.update_market_value(
PriceParser.parse(243.84), PriceParser.parse(243.86),
datetime(2016, 1, 5)
)
print(self.position, '\n')
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250+150)
self.assertEqual(self.position.open_quantity, 400+250+150-50)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61 + 243.50*150+1.81) / 800, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), (243.77*50+1)/50)
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61+1.81+1, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), round(-1*244.15*350 + 350/400*4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81, 4))
self.assertEqual(PriceParser.display(self.position.market_value), -1*243.86*750, 2)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), round((-1*243.86*750) - (-1*244.15*350 + 350/400*4.18 -1*243.88*250 + 2.61 -1*243.50*150 + 1.81), 4))
self.assertEqual(PriceParser.display(self.position.realised_pnl), 17.4775)
print("Buy 750 SPY at 244.29 with $3.75 commission. Update market value with bid/ask of 243.84/243.86:")
self.position.transact_shares(
"BOT", 750, PriceParser.parse(244.29), PriceParser.parse(3.75)
)
self.position.update_market_value(
PriceParser.parse(243.29), PriceParser.parse(243.29),
datetime(2016, 1, 6)
)
print(self.position, '\n')
print("bots:", self.position.bots)
print("solds:", self.position.solds)
self.assertEqual(self.position.action, "SLD")
self.assertEqual(self.position.ticker, "SPY")
self.assertEqual(self.position.quantity, 400+250+150)
self.assertEqual(self.position.open_quantity, 400+250+150-50-750)
self.assertEqual(PriceParser.display(self.position.entry_price, 5), round((244.15*400+4.18 + 243.88*250+2.61 + 243.50*150+1.81) / 800, 5))
self.assertEqual(PriceParser.display(self.position.exit_price, 5), round((243.77*50+1 + 244.29*750+3.75)/800, 5))
self.assertEqual(PriceParser.display(self.position.total_commission), round(4.18+2.61+1.81+1+3.75, 2))
self.assertEqual(PriceParser.display(self.position.cost_basis), 0)
self.assertEqual(PriceParser.display(self.position.market_value), 0)
self.assertEqual(PriceParser.display(self.position.unrealised_pnl), 0)
self.assertEqual(PriceParser.display(self.position.realised_pnl), -264.35)
if __name__ == "__main__":
unittest.main()
| 54.610738
| 177
| 0.665233
| 1,157
| 8,137
| 4.621435
| 0.111495
| 0.168319
| 0.170189
| 0.216009
| 0.799514
| 0.786609
| 0.772396
| 0.757621
| 0.660183
| 0.613054
| 0
| 0.139704
| 0.18717
| 8,137
| 148
| 178
| 54.97973
| 0.668733
| 0.039204
| 0
| 0.367521
| 0
| 0.042735
| 0.073256
| 0
| 0
| 0
| 0
| 0
| 0.470085
| 1
| 0.017094
| false
| 0
| 0.034188
| 0
| 0.059829
| 0.128205
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad51ca380d95e2a4ab5344077a584650b02823ba
| 160
|
py
|
Python
|
info/modules/passport/__init__.py
|
xnzgt/git_flask_news
|
2511927efd2ecd05f2e4312a896cbdfaf69da790
|
[
"MIT"
] | null | null | null |
info/modules/passport/__init__.py
|
xnzgt/git_flask_news
|
2511927efd2ecd05f2e4312a896cbdfaf69da790
|
[
"MIT"
] | null | null | null |
info/modules/passport/__init__.py
|
xnzgt/git_flask_news
|
2511927efd2ecd05f2e4312a896cbdfaf69da790
|
[
"MIT"
] | null | null | null |
# 创建蓝图接收前端发送数据
from flask import Blueprint
# 设置url_prefix用于与其他蓝图进行区分
passport_blu = Blueprint("passport",__name__,url_prefix="/passport")
from .views import *
| 22.857143
| 68
| 0.80625
| 18
| 160
| 6.777778
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 160
| 6
| 69
| 26.666667
| 0.847222
| 0.225
| 0
| 0
| 0
| 0
| 0.140496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.